blk-cgroup.h 21.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
T
Tejun Heo 已提交
17
#include <linux/percpu_counter.h>
18
#include <linux/seq_file.h>
19
#include <linux/radix-tree.h>
20
#include <linux/blkdev.h>
21
#include <linux/atomic.h>
22

T
Tejun Heo 已提交
23 24 25
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)

26 27 28
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
29 30
#ifdef CONFIG_BLK_CGROUP

31 32 33 34 35 36 37 38
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
39 40
};

41 42
struct blkcg_gq;

T
Tejun Heo 已提交
43
struct blkcg {
44 45
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
46 47 48

	struct radix_tree_root		blkg_tree;
	struct blkcg_gq			*blkg_hint;
49
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
50

51
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
52

T
Tejun Heo 已提交
53
	struct list_head		all_blkcgs_node;
54 55 56
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
57 58
};

59 60
/*
 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
T
Tejun Heo 已提交
61 62
 * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
 * to carry result values from read and sum operations.
63
 */
64
struct blkg_stat {
T
Tejun Heo 已提交
65
	struct percpu_counter		cpu_cnt;
66
	atomic64_t			aux_cnt;
67 68 69
};

struct blkg_rwstat {
T
Tejun Heo 已提交
70
	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
71
	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
72 73
};

74 75 76 77 78
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
79 80 81 82 83
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
84
 */
85
struct blkg_policy_data {
T
Tejun Heo 已提交
86
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
87
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
88
	int				plid;
89 90
};

91
/*
92 93 94 95 96
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
97 98
 */
struct blkcg_policy_data {
99 100
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
101 102 103
	int				plid;
};

T
Tejun Heo 已提交
104 105
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
106
	/* Pointer to the associated request_queue */
107 108 109
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
110
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
111

112 113 114 115 116 117
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
118 119 120
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

121 122
	/* request allocation list for this blkcg-q pair */
	struct request_list		rl;
T
Tejun Heo 已提交
123

T
Tejun Heo 已提交
124
	/* reference count */
125
	atomic_t			refcnt;
126

127 128 129
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

130 131 132
	struct blkg_rwstat		stat_bytes;
	struct blkg_rwstat		stat_ios;

133
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
134

135
	struct rcu_head			rcu_head;
136 137
};

138
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
139
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
140
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
141
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
142 143 144
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
145
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
146
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
147

T
Tejun Heo 已提交
148
struct blkcg_policy {
149 150
	int				plid;
	/* cgroup files for the policy */
151
	struct cftype			*legacy_cftypes;
152 153

	/* operations */
154
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
155
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
156 157
	blkcg_pol_free_cpd_fn		*cpd_free_fn;

158
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
159
	blkcg_pol_init_pd_fn		*pd_init_fn;
160 161
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
162
	blkcg_pol_free_pd_fn		*pd_free_fn;
163
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
164 165
};

T
Tejun Heo 已提交
166
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
167
extern struct cgroup_subsys_state * const blkcg_root_css;
168

T
Tejun Heo 已提交
169 170
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
T
Tejun Heo 已提交
171 172
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
173 174 175
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
176

177
/* Blkio controller policy registration */
178
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
179
void blkcg_policy_unregister(struct blkcg_policy *pol);
180
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
181
			  const struct blkcg_policy *pol);
182
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
183
			     const struct blkcg_policy *pol);
184

185
const char *blkg_dev_name(struct blkcg_gq *blkg);
T
Tejun Heo 已提交
186
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
187 188
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
189
		       const struct blkcg_policy *pol, int data,
190
		       bool show_total);
191 192
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
193
			 const struct blkg_rwstat *rwstat);
194 195 196
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
197 198 199 200
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
int blkg_print_stat_ios(struct seq_file *sf, void *v);
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
201

202 203 204 205
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
			    struct blkcg_policy *pol, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
					     struct blkcg_policy *pol, int off);
206

207
struct blkg_conf_ctx {
208
	struct gendisk			*disk;
T
Tejun Heo 已提交
209
	struct blkcg_gq			*blkg;
210
	char				*body;
211 212
};

T
Tejun Heo 已提交
213
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
214
		   char *input, struct blkg_conf_ctx *ctx);
215 216 217
void blkg_conf_finish(struct blkg_conf_ctx *ctx);


218 219 220 221 222
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

223 224
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
225
	return css_to_blkcg(task_css(tsk, io_cgrp_id));
226 227 228 229 230
}

static inline struct blkcg *bio_blkcg(struct bio *bio)
{
	if (bio && bio->bi_css)
231
		return css_to_blkcg(bio->bi_css);
232 233 234
	return task_blkcg(current);
}

235 236 237
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
238
	return task_get_css(task, io_cgrp_id);
239 240
}

T
Tejun Heo 已提交
241 242 243 244 245 246 247 248
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
249
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
250 251
}

T
Tejun Heo 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

269 270 271
	if (blkcg == &blkcg_root)
		return q->root_blkg;

T
Tejun Heo 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 * - see blk_queue_bypass_start() for details.
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());

	if (unlikely(blk_queue_bypass(q)))
		return NULL;
	return __blkg_lookup(blkcg, q, false);
}

298 299 300 301 302 303 304
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
305 306
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
307
{
308
	return blkg ? blkg->pd[pol->plid] : NULL;
309 310
}

311 312 313
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
314
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
315 316
}

317 318
/**
 * pdata_to_blkg - get blkg associated with policy private data
319
 * @pd: policy private data of interest
320
 *
321
 * @pd is policy private data.  Determine the blkg it's associated with.
322
 */
323
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
324
{
325
	return pd ? pd->blkg : NULL;
326 327
}

328 329 330 331 332
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

T
Tejun Heo 已提交
333 334 335 336 337 338 339 340
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
341
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
342
{
T
Tejun Heo 已提交
343
	char *p;
T
Tejun Heo 已提交
344

T
Tejun Heo 已提交
345 346
	p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
	if (!p) {
T
Tejun Heo 已提交
347
		strncpy(buf, "<unavailable>", buflen);
T
Tejun Heo 已提交
348 349 350 351 352
		return -ENAMETOOLONG;
	}

	memmove(buf, p, buf + buflen - p);
	return 0;
353 354
}

T
Tejun Heo 已提交
355 356 357 358
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
359
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
360
 */
T
Tejun Heo 已提交
361
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
362
{
363 364
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
T
Tejun Heo 已提交
365 366
}

367
void __blkg_release_rcu(struct rcu_head *rcu);
T
Tejun Heo 已提交
368 369 370 371 372

/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
373
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
374
{
375 376
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
377
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
T
Tejun Heo 已提交
378 379
}

380 381 382
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
383
 * @pos_css: used for iteration
384 385 386 387 388
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
389
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
390
 * @p_blkg is included in the iteration and the first node to be visited.
391
 */
392 393 394
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
395 396
					      (p_blkg)->q, false)))

397 398 399
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
400
 * @pos_css: used for iteration
401 402 403
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
404 405
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
406
 */
407 408 409
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
410 411
					      (p_blkg)->q, false)))

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/**
 * blk_get_rl - get request_list to use
 * @q: request_queue of interest
 * @bio: bio which will be attached to the allocated request (may be %NULL)
 *
 * The caller wants to allocate a request from @q to use for @bio.  Find
 * the request_list to use and obtain a reference on it.  Should be called
 * under queue_lock.  This function is guaranteed to return non-%NULL
 * request_list.
 */
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;

	rcu_read_lock();

	blkcg = bio_blkcg(bio);

	/* bypass blkg lookup and use @q->root_rl directly for root */
	if (blkcg == &blkcg_root)
		goto root_rl;

	/*
	 * Try to use blkg->rl.  blkg lookup may fail under memory pressure
	 * or if either the blkcg or queue is going away.  Fall back to
	 * root_rl in such cases.
	 */
441 442
	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg))
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
		goto root_rl;

	blkg_get(blkg);
	rcu_read_unlock();
	return &blkg->rl;
root_rl:
	rcu_read_unlock();
	return &q->root_rl;
}

/**
 * blk_put_rl - put request_list
 * @rl: request_list to put
 *
 * Put the reference acquired by blk_get_rl().  Should be called under
 * queue_lock.
 */
static inline void blk_put_rl(struct request_list *rl)
{
462
	if (rl->blkg->blkcg != &blkcg_root)
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
		blkg_put(rl->blkg);
}

/**
 * blk_rq_set_rl - associate a request with a request_list
 * @rq: request of interest
 * @rl: target request_list
 *
 * Associate @rq with @rl so that accounting and freeing can know the
 * request_list @rq came from.
 */
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
{
	rq->rl = rl;
}

/**
 * blk_rq_rl - return the request_list a request came from
 * @rq: request of interest
 *
 * Return the request_list @rq is allocated from.
 */
static inline struct request_list *blk_rq_rl(struct request *rq)
{
	return rq->rl;
}

struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q);
/**
 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 *
 * Should be used under queue_lock.
 */
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))

T
Tejun Heo 已提交
500
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
501
{
T
Tejun Heo 已提交
502 503 504 505 506 507
	int ret;

	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
	if (ret)
		return ret;

508
	atomic64_set(&stat->aux_cnt, 0);
T
Tejun Heo 已提交
509 510 511 512 513 514
	return 0;
}

static inline void blkg_stat_exit(struct blkg_stat *stat)
{
	percpu_counter_destroy(&stat->cpu_cnt);
515 516
}

517 518 519 520 521
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
T
Tejun Heo 已提交
522 523
 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
 * don't re-enter this function for the same counter.
524 525 526
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
T
Tejun Heo 已提交
527
	__percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
528 529 530 531 532 533 534 535
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
T
Tejun Heo 已提交
536
	return percpu_counter_sum_positive(&stat->cpu_cnt);
537 538 539 540 541 542 543 544
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
T
Tejun Heo 已提交
545
	percpu_counter_set(&stat->cpu_cnt, 0);
546
	atomic64_set(&stat->aux_cnt, 0);
547 548
}

549
/**
550
 * blkg_stat_add_aux - add a blkg_stat into another's aux count
551 552 553
 * @to: the destination blkg_stat
 * @from: the source
 *
554
 * Add @from's count including the aux one to @to's aux count.
555
 */
556 557
static inline void blkg_stat_add_aux(struct blkg_stat *to,
				     struct blkg_stat *from)
558
{
559 560
	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
		     &to->aux_cnt);
561 562
}

T
Tejun Heo 已提交
563
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
564
{
T
Tejun Heo 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577
	int i, ret;

	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
		if (ret) {
			while (--i >= 0)
				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
			return ret;
		}
		atomic64_set(&rwstat->aux_cnt[i], 0);
	}
	return 0;
}
578

T
Tejun Heo 已提交
579 580 581
static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
{
	int i;
582 583

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
T
Tejun Heo 已提交
584
		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
585 586
}

587 588 589 590 591 592 593 594 595 596 597 598
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
{
T
Tejun Heo 已提交
599
	struct percpu_counter *cnt;
600 601

	if (rw & REQ_WRITE)
T
Tejun Heo 已提交
602
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
603
	else
T
Tejun Heo 已提交
604 605 606 607
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];

	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);

608
	if (rw & REQ_SYNC)
T
Tejun Heo 已提交
609
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
610
	else
T
Tejun Heo 已提交
611
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
612

T
Tejun Heo 已提交
613
	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
614 615 616 617 618 619
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
T
Tejun Heo 已提交
620
 * Read the current snapshot of @rwstat and return it in the aux counts.
621
 */
622
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
623
{
T
Tejun Heo 已提交
624 625
	struct blkg_rwstat result;
	int i;
626

T
Tejun Heo 已提交
627 628 629 630
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_set(&result.aux_cnt[i],
			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
	return result;
631 632 633
}

/**
634
 * blkg_rwstat_total - read the total count of a blkg_rwstat
635 636 637 638 639 640
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
641
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
642 643 644
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

T
Tejun Heo 已提交
645 646
	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
647 648 649 650 651 652 653 654
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
655 656
	int i;

T
Tejun Heo 已提交
657 658
	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
659
		atomic64_set(&rwstat->aux_cnt[i], 0);
T
Tejun Heo 已提交
660
	}
661 662
}

663
/**
664
 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
665 666 667
 * @to: the destination blkg_rwstat
 * @from: the source
 *
668
 * Add @from's count including the aux one to @to's aux count.
669
 */
670 671
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
				       struct blkg_rwstat *from)
672 673 674 675 676
{
	struct blkg_rwstat v = blkg_rwstat_read(from);
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
T
Tejun Heo 已提交
677 678
		atomic64_add(atomic64_read(&v.aux_cnt[i]) +
			     atomic64_read(&from->aux_cnt[i]),
679
			     &to->aux_cnt[i]);
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
	struct blkcg *blkcg;
	struct blkcg_gq *blkg;
	bool throtl = false;

	rcu_read_lock();
	blkcg = bio_blkcg(bio);

	blkg = blkg_lookup(blkcg, q);
	if (unlikely(!blkg)) {
		spin_lock_irq(q->queue_lock);
		blkg = blkg_lookup_create(blkcg, q);
		if (IS_ERR(blkg))
			blkg = NULL;
		spin_unlock_irq(q->queue_lock);
	}

	throtl = blk_throtl_bio(q, blkg, bio);

711 712 713 714 715 716 717
	if (!throtl) {
		blkg = blkg ?: q->root_blkg;
		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags,
				bio->bi_iter.bi_size);
		blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1);
	}

718 719 720 721
	rcu_read_unlock();
	return !throtl;
}

722 723
#else	/* CONFIG_BLK_CGROUP */

724 725
struct blkcg {
};
726

727 728 729
struct blkg_policy_data {
};

730 731 732
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
733
struct blkcg_gq {
734 735
};

T
Tejun Heo 已提交
736
struct blkcg_policy {
737 738
};

T
Tejun Heo 已提交
739 740
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

741 742 743 744 745 746
static inline struct cgroup_subsys_state *
task_get_blkcg_css(struct task_struct *task)
{
	return NULL;
}

747 748
#ifdef CONFIG_BLOCK

T
Tejun Heo 已提交
749
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
750 751 752
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
753
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
754
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
755
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
756
					const struct blkcg_policy *pol) { return 0; }
757
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
758 759
					   const struct blkcg_policy *pol) { }

760
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
761

762 763 764
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
765 766 767
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
768

769 770 771 772 773 774
static inline struct request_list *blk_get_rl(struct request_queue *q,
					      struct bio *bio) { return &q->root_rl; }
static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }

775 776 777
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

778 779 780
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

781
#endif	/* CONFIG_BLOCK */
782 783
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */