blk-cgroup.h 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
17
#include <linux/u64_stats_sync.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

23 24 25
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
26 27
#ifdef CONFIG_BLK_CGROUP

28 29 30 31 32 33 34 35
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 37
};

38 39
struct blkcg_gq;

T
Tejun Heo 已提交
40
struct blkcg {
41 42
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
43 44 45

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
46
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
47

48
	struct blkcg_policy_data	*pd[BLKCG_MAX_POLS];
49 50 51 52

#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
53 54
};

55 56 57 58 59 60 61 62 63 64
struct blkg_stat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt;
};

struct blkg_rwstat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt[BLKG_RWSTAT_NR];
};

65 66 67 68 69 70 71 72 73 74 75 76 77
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
 * There can be multiple active blkcg policies and each has its private
 * data on each blkg, the size of which is determined by
 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
 * together with blkg and invokes pd_init/exit_fn() methods.
 *
 * Such private data must embed struct blkg_policy_data (pd) at the
 * beginning and pd_size can't be smaller than pd.
 */
78
struct blkg_policy_data {
T
Tejun Heo 已提交
79
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
80
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
81
	int				plid;
82

83
	/* used during policy activation */
84
	struct list_head		alloc_node;
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/*
 * Policies that need to keep per-blkcg data which is independent
 * from any request_queue associated to it must specify its size
 * with the cpd_size field of the blkcg_policy structure and
 * embed a blkcg_policy_data in it. blkcg core allocates
 * policy-specific per-blkcg structures lazily the first time
 * they are actually needed, so it handles them together with
 * blkgs. cpd_init() is invoked to let each policy handle
 * per-blkcg data.
 */
struct blkcg_policy_data {
	/* the policy id this per-policy data belongs to */
	int				plid;

	/* used during policy activation */
	struct list_head		alloc_node;
};

T
Tejun Heo 已提交
105 106
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
107
	/* Pointer to the associated request_queue */
108 109 110
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
111
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
112

113 114 115 116 117 118
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
119 120 121
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

122 123
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
T
Tejun Heo 已提交
124

T
Tejun Heo 已提交
125
	/* reference count */
126
	atomic_t			refcnt;
127

128 129 130
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

131
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
132

133
	struct rcu_head			rcu_head;
134 135
};

136
typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
T
Tejun Heo 已提交
137
typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
138 139
typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
T
Tejun Heo 已提交
140 141
typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
142

T
Tejun Heo 已提交
143
struct blkcg_policy {
144 145
	int				plid;
	/* policy specific private data size */
146
	size_t				pd_size;
147 148
	/* policy specific per-blkcg data size */
	size_t				cpd_size;
149 150
	/* cgroup files for the policy */
	struct cftype			*cftypes;
151 152

	/* operations */
153
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
154
	blkcg_pol_init_pd_fn		*pd_init_fn;
155 156
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
157 158
	blkcg_pol_exit_pd_fn		*pd_exit_fn;
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
159 160
};

T
Tejun Heo 已提交
161
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
162
extern struct cgroup_subsys_state * const blkcg_root_css;
163

T
Tejun Heo 已提交
164 165 166
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
167 168 169
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
170

171
/* Blkio controller policy registration */
172
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
173
void blkcg_policy_unregister(struct blkcg_policy *pol);
174
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
175
			  const struct blkcg_policy *pol);
176
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
177
			     const struct blkcg_policy *pol);
178

T
Tejun Heo 已提交
179
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
180 181
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
182
		       const struct blkcg_policy *pol, int data,
183
		       bool show_total);
184 185
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
186
			 const struct blkg_rwstat *rwstat);
187 188 189
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
190

191 192 193 194
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off);

195
struct blkg_conf_ctx {
196
	struct gendisk			*disk;
T
Tejun Heo 已提交
197
	struct blkcg_gq			*blkg;
198
	u64				v;
199 200
};

T
Tejun Heo 已提交
201 202
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx);
203 204 205
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


206 207 208 209 210
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

211 212
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
213
	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
214 215 216 217 218
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
219
		return css_to_blkcg(bio->bi_css);
220 221 222
	return task_blkcg(current);
}

223 224 225 226 227 228
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return task_get_css(task, blkio_cgrp_id);
}

T
Tejun Heo 已提交
229 230 231 232 233 234 235 236
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
237
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
238 239
}

240 241 242 243 244 245 246
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
247 248
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
249
{
250
	return blkg ? blkg->pd[pol->plid] : NULL;
251 252
}

253 254 255 256 257 258
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
	return blkcg ? blkcg->pd[pol->plid] : NULL;
}

259 260
/**
 * pdata_to_blkg - get blkg associated with policy private data
261
 * @pd: policy private data of interest
262
 *
263
 * @pd is policy private data.  Determine the blkg it's associated with.
264
 */
265
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
266
{
267
	return pd ? pd->blkg : NULL;
268 269
}

T
Tejun Heo 已提交
270 271 272 273 274 275 276 277
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
278
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
279
{
T
Tejun Heo 已提交
280
	char *p;
T
Tejun Heo 已提交
281

T
Tejun Heo 已提交
282 283
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
T
Tejun Heo 已提交
284
		strncpy(buf, "<unavailable>", buflen);
T
Tejun Heo 已提交
285 286 287 288 289
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
290 291
}

T
Tejun Heo 已提交
292 293 294 295
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
296
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
297
 */
T
Tejun Heo 已提交
298
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
299
{
300 301
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
T
Tejun Heo 已提交
302 303
}

304
void __blkg_release_rcu(struct rcu_head *rcu);
T
Tejun Heo 已提交
305 306 307 308 309

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
310
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
311
{
312 313
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
314
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
T
Tejun Heo 已提交
315 316
}

317 318 319 320 321 322
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint);

/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
323
 * @pos_css: used for iteration
324 325 326 327 328
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
329
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
330
 * @p_blkg is included in the iteration and the first node to be visited.
331
 */
332 333 334
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
335 336
					      (p_blkg)->q, false)))

337 338 339
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
340
 * @pos_css: used for iteration
341 342 343
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
344 345
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
346
 */
347 348 349
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
350 351
					      (p_blkg)->q, false)))

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
	blkg = blkg_lookup_create(blkcg, q);
	if (unlikely(IS_ERR(blkg)))
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
	/* root_rl may not have blkg set */
	if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

441 442 443 444 445
static inline void blkg_stat_init(struct blkg_stat *stat)
{
	u64_stats_init(&stat->syncp);
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 * this function.
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
	u64_stats_update_begin(&stat->syncp);
	stat->cnt += val;
	u64_stats_update_end(&stat->syncp);
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 *
 * Read the current value of @stat.  This function can be called without
 * synchroniztion and takes care of u64 atomicity.
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
	unsigned int start;
	uint64_t v;

	do {
474
		start = u64_stats_fetch_begin_irq(&stat->syncp);
475
		v = stat->cnt;
476
	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
477 478 479 480 481 482 483 484 485 486 487 488 489

	return v;
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
	stat->cnt = 0;
}

490 491 492 493 494 495 496 497 498 499 500 501
/**
 * blkg_stat_merge - merge a blkg_stat into another
 * @to: the destination blkg_stat
 * @from: the source
 *
 * Add @from's count to @to.
 */
static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
{
	blkg_stat_add(to, blkg_stat_read(from));
}

502 503 504 505 506
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
	u64_stats_init(&rwstat->syncp);
}

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
	u64_stats_update_begin(&rwstat->syncp);

	if (rw & REQ_WRITE)
		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_READ] += val;
	if (rw & REQ_SYNC)
		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;

	u64_stats_update_end(&rwstat->syncp);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it as the return value.
 * This function can be called without synchronization and takes care of
 * u64 atomicity.
 */
541
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
542 543 544 545 546
{
	unsigned int start;
	struct blkg_rwstat tmp;

	do {
547
		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
548
		tmp = *rwstat;
549
	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
550 551 552 553 554

	return tmp;
}

/**
555
 * blkg_rwstat_total - read the total count of a blkg_rwstat
556 557 558 559 560 561
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
562
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
/**
 * blkg_rwstat_merge - merge a blkg_rwstat into another
 * @to: the destination blkg_rwstat
 * @from: the source
 *
 * Add @from's counts to @to.
 */
static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
				     struct blkg_rwstat *from)
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	u64_stats_update_begin(&to->syncp);
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		to->cnt[i] += v.cnt[i];
	u64_stats_update_end(&to->syncp);
}

597 598
#else	/* CONFIG_BLK_CGROUP */

599 600
struct blkcg {
};
601

602 603 604
struct blkg_policy_data {
};

605 606 607
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
608
struct blkcg_gq {
609 610
};

T
Tejun Heo 已提交
611
struct blkcg_policy {
612 613
};

T
Tejun Heo 已提交
614 615
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

616 617 618 619 620 621
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

622 623
#ifdef CONFIG_BLOCK

T
Tejun Heo 已提交
624
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
625 626 627
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
628
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
629
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
630
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
631
					const struct blkcg_policy *pol) { return 0; }
632
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
633 634
					   const struct blkcg_policy *pol) { }

635
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
636

637 638 639
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
640 641 642
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
643

644 645 646 647 648 649 650 651 652
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

653
#endif	/* CONFIG_BLOCK */
654 655
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */