blk-cgroup.h 17.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
17
#include <linux/u64_stats_sync.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

23 24 25
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
26 27
#ifdef CONFIG_BLK_CGROUP

28 29 30 31 32 33 34 35
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 37
};

38 39
struct blkcg_gq;

T
Tejun Heo 已提交
40
struct blkcg {
41 42
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
43 44 45

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
46
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
47

48
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
49

T
Tejun Heo 已提交
50
	struct list_head		all_blkcgs_node;
51 52 53
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
54 55
};

56 57 58 59 60 61 62 63 64 65
struct blkg_stat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt;
};

struct blkg_rwstat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt[BLKG_RWSTAT_NR];
};

66 67 68 69 70
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
71 72 73 74 75
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
76
 */
77
struct blkg_policy_data {
T
Tejun Heo 已提交
78
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
79
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
80
	int				plid;
81 82
};

83 84 85 86
/*
 * Policies that need to keep per-blkcg data which is independent
 * from any request_queue associated to it must specify its size
 * with the cpd_size field of the blkcg_policy structure and
87 88
 * embed a blkcg_policy_data in it.  cpd_init() is invoked to let
 * each policy handle per-blkcg data.
89 90
 */
struct blkcg_policy_data {
91 92
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
93 94 95
	int				plid;
};

T
Tejun Heo 已提交
96 97
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
98
	/* Pointer to the associated request_queue */
99 100 101
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
102
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
103

104 105 106 107 108 109
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
110 111 112
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

113 114
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
T
Tejun Heo 已提交
115

T
Tejun Heo 已提交
116
	/* reference count */
117
	atomic_t			refcnt;
118

119 120 121
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

122
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
123

124
	struct rcu_head			rcu_head;
125 126
};

127
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
128
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
129 130 131
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
132
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
133
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
134

T
Tejun Heo 已提交
135
struct blkcg_policy {
136
	int				plid;
137 138
	/* policy specific per-blkcg data size */
	size_t				cpd_size;
139 140
	/* cgroup files for the policy */
	struct cftype			*cftypes;
141 142

	/* operations */
143
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
144
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
145
	blkcg_pol_init_pd_fn		*pd_init_fn;
146 147
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
148
	blkcg_pol_free_pd_fn		*pd_free_fn;
149
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
150 151
};

T
Tejun Heo 已提交
152
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
153
extern struct cgroup_subsys_state * const blkcg_root_css;
154

T
Tejun Heo 已提交
155 156 157
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
158 159 160
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
161

162
/* Blkio controller policy registration */
163
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
164
void blkcg_policy_unregister(struct blkcg_policy *pol);
165
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
166
			  const struct blkcg_policy *pol);
167
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
168
			     const struct blkcg_policy *pol);
169

T
Tejun Heo 已提交
170
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
171 172
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
173
		       const struct blkcg_policy *pol, int data,
174
		       bool show_total);
175 176
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
177
			 const struct blkg_rwstat *rwstat);
178 179 180
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
181

182 183 184 185
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off);

186
struct blkg_conf_ctx {
187
	struct gendisk			*disk;
T
Tejun Heo 已提交
188
	struct blkcg_gq			*blkg;
189
	u64				v;
190 191
};

T
Tejun Heo 已提交
192 193
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx);
194 195 196
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


197 198 199 200 201
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

202 203
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
204
	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
205 206 207 208 209
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
210
		return css_to_blkcg(bio->bi_css);
211 212 213
	return task_blkcg(current);
}

214 215 216 217 218 219
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return task_get_css(task, blkio_cgrp_id);
}

T
Tejun Heo 已提交
220 221 222 223 224 225 226 227
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
228
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
229 230
}

231 232 233 234 235 236 237
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
238 239
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
240
{
241
	return blkg ? blkg->pd[pol->plid] : NULL;
242 243
}

244 245 246
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
247
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
248 249
}

250 251
/**
 * pdata_to_blkg - get blkg associated with policy private data
252
 * @pd: policy private data of interest
253
 *
254
 * @pd is policy private data.  Determine the blkg it's associated with.
255
 */
256
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
257
{
258
	return pd ? pd->blkg : NULL;
259 260
}

261 262 263 264 265
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

T
Tejun Heo 已提交
266 267 268 269 270 271 272 273
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
274
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
275
{
T
Tejun Heo 已提交
276
	char *p;
T
Tejun Heo 已提交
277

T
Tejun Heo 已提交
278 279
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
T
Tejun Heo 已提交
280
		strncpy(buf, "<unavailable>", buflen);
T
Tejun Heo 已提交
281 282 283 284 285
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
286 287
}

T
Tejun Heo 已提交
288 289 290 291
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
292
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
293
 */
T
Tejun Heo 已提交
294
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
295
{
296 297
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
T
Tejun Heo 已提交
298 299
}

300
void __blkg_release_rcu(struct rcu_head *rcu);
T
Tejun Heo 已提交
301 302 303 304 305

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
306
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
307
{
308 309
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
310
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
T
Tejun Heo 已提交
311 312
}

313 314 315 316 317 318
struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
			       bool update_hint);

/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
319
 * @pos_css: used for iteration
320 321 322 323 324
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
325
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
326
 * @p_blkg is included in the iteration and the first node to be visited.
327
 */
328 329 330
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
331 332
					      (p_blkg)->q, false)))

333 334 335
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
336
 * @pos_css: used for iteration
337 338 339
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
340 341
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
342
 */
343 344 345
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
346 347
					      (p_blkg)->q, false)))

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
	blkg = blkg_lookup_create(blkcg, q);
	if (unlikely(IS_ERR(blkg)))
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
398
	if (rl->blkg->blkcg != &blkcg_root)
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

436 437 438 439 440
static inline void blkg_stat_init(struct blkg_stat *stat)
{
	u64_stats_init(&stat->syncp);
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 * this function.
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
	u64_stats_update_begin(&stat->syncp);
	stat->cnt += val;
	u64_stats_update_end(&stat->syncp);
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 *
 * Read the current value of @stat.  This function can be called without
 * synchroniztion and takes care of u64 atomicity.
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
	unsigned int start;
	uint64_t v;

	do {
469
		start = u64_stats_fetch_begin_irq(&stat->syncp);
470
		v = stat->cnt;
471
	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
472 473 474 475 476 477 478 479 480 481 482 483 484

	return v;
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
	stat->cnt = 0;
}

485 486 487 488 489 490 491 492 493 494 495 496
/**
 * blkg_stat_merge - merge a blkg_stat into another
 * @to: the destination blkg_stat
 * @from: the source
 *
 * Add @from's count to @to.
 */
static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
{
	blkg_stat_add(to, blkg_stat_read(from));
}

497 498 499 500 501
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
	u64_stats_init(&rwstat->syncp);
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
	u64_stats_update_begin(&rwstat->syncp);

	if (rw & REQ_WRITE)
		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_READ] += val;
	if (rw & REQ_SYNC)
		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;

	u64_stats_update_end(&rwstat->syncp);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it as the return value.
 * This function can be called without synchronization and takes care of
 * u64 atomicity.
 */
536
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
537 538 539 540 541
{
	unsigned int start;
	struct blkg_rwstat tmp;

	do {
542
		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
543
		tmp = *rwstat;
544
	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
545 546 547 548 549

	return tmp;
}

/**
550
 * blkg_rwstat_total - read the total count of a blkg_rwstat
551 552 553 554 555 556
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
557
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
/**
 * blkg_rwstat_merge - merge a blkg_rwstat into another
 * @to: the destination blkg_rwstat
 * @from: the source
 *
 * Add @from's counts to @to.
 */
static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
				     struct blkg_rwstat *from)
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	u64_stats_update_begin(&to->syncp);
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		to->cnt[i] += v.cnt[i];
	u64_stats_update_end(&to->syncp);
}

592 593
#else	/* CONFIG_BLK_CGROUP */

594 595
struct blkcg {
};
596

597 598 599
struct blkg_policy_data {
};

600 601 602
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
603
struct blkcg_gq {
604 605
};

T
Tejun Heo 已提交
606
struct blkcg_policy {
607 608
};

T
Tejun Heo 已提交
609 610
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

611 612 613 614 615 616
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

617 618
#ifdef CONFIG_BLOCK

T
Tejun Heo 已提交
619
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
620 621 622
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
623
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
624
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
625
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
626
					const struct blkcg_policy *pol) { return 0; }
627
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
628 629
					   const struct blkcg_policy *pol) { }

630
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
631

632 633 634
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
635 636 637
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
638

639 640 641 642 643 644 645 646 647
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

648
#endif	/* CONFIG_BLOCK */
649 650
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */