blk-cgroup.h 20.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
17
#include <linux/u64_stats_sync.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

23 24 25
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
26 27
#ifdef CONFIG_BLK_CGROUP

28 29 30 31 32 33 34 35
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 37
};

38 39
struct blkcg_gq;

T
Tejun Heo 已提交
40
struct blkcg {
41 42
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
43 44 45

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
46
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
47

48
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
49

T
Tejun Heo 已提交
50
	struct list_head		all_blkcgs_node;
51 52 53
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
54 55
};

56 57 58 59
/*
 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
 * recursive.  Used to carry stats of dead children.
 */
60 61 62
struct blkg_stat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt;
63
	atomic64_t			aux_cnt;
64 65 66 67 68
};

struct blkg_rwstat {
	struct u64_stats_sync		syncp;
	uint64_t			cnt[BLKG_RWSTAT_NR];
69
	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
70 71
};

72 73 74 75 76
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
77 78 79 80 81
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
82
 */
83
struct blkg_policy_data {
T
Tejun Heo 已提交
84
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
85
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
86
	int				plid;
87 88
};

89
/*
90 91 92 93 94
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
95 96
 */
struct blkcg_policy_data {
97 98
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
99 100 101
	int				plid;
};

T
Tejun Heo 已提交
102 103
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
104
	/* Pointer to the associated request_queue */
105 106 107
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
108
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
109

110 111 112 113 114 115
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
116 117 118
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

119 120
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
T
Tejun Heo 已提交
121

T
Tejun Heo 已提交
122
	/* reference count */
123
	atomic_t			refcnt;
124

125 126 127
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

128
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
129

130
	struct rcu_head			rcu_head;
131 132
};

133
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
134
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
135
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
136
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
137 138 139
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
140
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
141
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
142

T
Tejun Heo 已提交
143
struct blkcg_policy {
144 145 146
	int				plid;
	/* cgroup files for the policy */
	struct cftype			*cftypes;
147 148

	/* operations */
149
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
150
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
151 152
	blkcg_pol_free_cpd_fn		*cpd_free_fn;

153
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
154
	blkcg_pol_init_pd_fn		*pd_init_fn;
155 156
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
157
	blkcg_pol_free_pd_fn		*pd_free_fn;
158
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
159 160
};

T
Tejun Heo 已提交
161
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
162
extern struct cgroup_subsys_state * const blkcg_root_css;
163

T
Tejun Heo 已提交
164 165
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
T
Tejun Heo 已提交
166 167
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
168 169 170
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
171

172
/* Blkio controller policy registration */
173
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
174
void blkcg_policy_unregister(struct blkcg_policy *pol);
175
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
176
			  const struct blkcg_policy *pol);
177
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
178
			     const struct blkcg_policy *pol);
179

T
Tejun Heo 已提交
180
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
181 182
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
183
		       const struct blkcg_policy *pol, int data,
184
		       bool show_total);
185 186
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
187
			 const struct blkg_rwstat *rwstat);
188 189 190
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
191

192 193 194 195
u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
					     int off);

196
struct blkg_conf_ctx {
197
	struct gendisk			*disk;
T
Tejun Heo 已提交
198
	struct blkcg_gq			*blkg;
199
	u64				v;
200 201
};

T
Tejun Heo 已提交
202 203
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   const char *input, struct blkg_conf_ctx *ctx);
204 205 206
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


207 208 209 210 211
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

212 213
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
214
	return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
215 216 217 218 219
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
220
		return css_to_blkcg(bio->bi_css);
221 222 223
	return task_blkcg(current);
}

224 225 226 227 228 229
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return task_get_css(task, blkio_cgrp_id);
}

T
Tejun Heo 已提交
230 231 232 233 234 235 236 237
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
238
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
239 240
}

T
Tejun Heo 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

258 259 260
	if (blkcg == &blkcg_root)
		return q->root_blkg;

T
Tejun Heo 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q, false);
}

287 288 289 290 291 292 293
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
294 295
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
296
{
297
	return blkg ? blkg->pd[pol->plid] : NULL;
298 299
}

300 301 302
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
303
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
304 305
}

306 307
/**
 * pdata_to_blkg - get blkg associated with policy private data
308
 * @pd: policy private data of interest
309
 *
310
 * @pd is policy private data.  Determine the blkg it's associated with.
311
 */
312
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
313
{
314
	return pd ? pd->blkg : NULL;
315 316
}

317 318 319 320 321
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

T
Tejun Heo 已提交
322 323 324 325 326 327 328 329
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
330
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
331
{
T
Tejun Heo 已提交
332
	char *p;
T
Tejun Heo 已提交
333

T
Tejun Heo 已提交
334 335
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
T
Tejun Heo 已提交
336
		strncpy(buf, "<unavailable>", buflen);
T
Tejun Heo 已提交
337 338 339 340 341
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
342 343
}

T
Tejun Heo 已提交
344 345 346 347
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
348
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
349
 */
T
Tejun Heo 已提交
350
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
351
{
352 353
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
T
Tejun Heo 已提交
354 355
}

356
void __blkg_release_rcu(struct rcu_head *rcu);
T
Tejun Heo 已提交
357 358 359 360 361

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
362
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
363
{
364 365
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
366
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
T
Tejun Heo 已提交
367 368
}

369 370 371
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
372
 * @pos_css: used for iteration
373 374 375 376 377
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
378
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
379
 * @p_blkg is included in the iteration and the first node to be visited.
380
 */
381 382 383
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
384 385
					      (p_blkg)->q, false)))

386 387 388
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
389
 * @pos_css: used for iteration
390 391 392
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
393 394
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
395
 */
396 397 398
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
399 400
					      (p_blkg)->q, false)))

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
430 431
	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg))
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
451
	if (rl->blkg->blkcg != &blkcg_root)
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

489 490 491
static inline void blkg_stat_init(struct blkg_stat *stat)
{
	u64_stats_init(&stat->syncp);
492
	atomic64_set(&stat->aux_cnt, 0);
493 494
}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 * this function.
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
	u64_stats_update_begin(&stat->syncp);
	stat->cnt += val;
	u64_stats_update_end(&stat->syncp);
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 *
514 515 516
 * Read the current value of @stat.  The returned value doesn't include the
 * aux count.  This function can be called without synchroniztion and takes
 * care of u64 atomicity.
517 518 519 520 521 522 523
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
	unsigned int start;
	uint64_t v;

	do {
524
		start = u64_stats_fetch_begin_irq(&stat->syncp);
525
		v = stat->cnt;
526
	} while (u64_stats_fetch_retry_irq(&stat->syncp, start));
527 528 529 530 531 532 533 534 535 536 537

	return v;
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
	stat->cnt = 0;
538
	atomic64_set(&stat->aux_cnt, 0);
539 540
}

541
/**
542
 * blkg_stat_add_aux - add a blkg_stat into another's aux count
543 544 545
 * @to: the destination blkg_stat
 * @from: the source
 *
546
 * Add @from's count including the aux one to @to's aux count.
547
 */
548 549
static inline void blkg_stat_add_aux(struct blkg_stat *to,
				     struct blkg_stat *from)
550
{
551 552
	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
		     &to->aux_cnt);
553 554
}

555 556
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
557 558
	int i;

559
	u64_stats_init(&rwstat->syncp);
560 561 562

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_set(&rwstat->aux_cnt[i], 0);
563 564
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
	u64_stats_update_begin(&rwstat->syncp);

	if (rw & REQ_WRITE)
		rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_READ] += val;
	if (rw & REQ_SYNC)
		rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
	else
		rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;

	u64_stats_update_end(&rwstat->syncp);
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
 * Read the current snapshot of @rwstat and return it as the return value.
 * This function can be called without synchronization and takes care of
 * u64 atomicity.
 */
599
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
600 601 602 603 604
{
	unsigned int start;
	struct blkg_rwstat tmp;

	do {
605
		start = u64_stats_fetch_begin_irq(&rwstat->syncp);
606
		tmp = *rwstat;
607
	} while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
608 609 610 611 612

	return tmp;
}

/**
613
 * blkg_rwstat_total - read the total count of a blkg_rwstat
614 615 616 617 618 619
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
620
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
621 622 623 624 625 626 627 628 629 630 631 632
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

	return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
633 634
	int i;

635
	memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
636 637 638

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_set(&rwstat->aux_cnt[i], 0);
639 640
}

641
/**
642
 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
643 644 645
 * @to: the destination blkg_rwstat
 * @from: the source
 *
646
 * Add @from's count including the aux one to @to's aux count.
647
 */
648 649
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
				       struct blkg_rwstat *from)
650 651 652 653 654
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
655 656
		atomic64_add(v.cnt[i] + atomic64_read(&from->aux_cnt[i]),
			     &to->aux_cnt[i]);
657 658
}

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;
	bool throtl = false;

	rcu_read_lock();
	blkcg = bio_blkcg(bio);

	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg)) {
		spin_lock_irq(q->queue_lock);
		blkg = blkg_lookup_create(blkcg, q);
		if (IS_ERR(blkg))
			blkg = NULL;
		spin_unlock_irq(q->queue_lock);
	}

	throtl = blk_throtl_bio(q, blkg, bio);

	rcu_read_unlock();
	return !throtl;
}

692 693
#else	/* CONFIG_BLK_CGROUP */

694 695
struct blkcg {
};
696

697 698 699
struct blkg_policy_data {
};

700 701 702
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
703
struct blkcg_gq {
704 705
};

T
Tejun Heo 已提交
706
struct blkcg_policy {
707 708
};

T
Tejun Heo 已提交
709 710
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

711 712 713 714 715 716
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

717 718
#ifdef CONFIG_BLOCK

T
Tejun Heo 已提交
719
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
720 721 722
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
723
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
724
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
725
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
726
					const struct blkcg_policy *pol) { return 0; }
727
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
728 729
					   const struct blkcg_policy *pol) { }

730
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
731

732 733 734
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
735 736 737
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
738

739 740 741 742 743 744
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

745 746 747
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

748 749 750
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

751
#endif	/* CONFIG_BLOCK */
752 753
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */