blk-cgroup.h 27.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
T
Tejun Heo 已提交
18
#include <linux/percpu_counter.h>
19
#include <linux/seq_file.h>
20
#include <linux/radix-tree.h>
21
#include <linux/blkdev.h>
22
#include <linux/atomic.h>
23
#include <linux/kthread.h>
24
#include <linux/fs.h>
25

T
Tejun Heo 已提交
26 27 28
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)

29 30 31
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
32 33
#ifdef CONFIG_BLK_CGROUP

34 35 36 37 38
enum blkg_rwstat_type {
	BLKG_RWSTAT_READ,
	BLKG_RWSTAT_WRITE,
	BLKG_RWSTAT_SYNC,
	BLKG_RWSTAT_ASYNC,
39
	BLKG_RWSTAT_DISCARD,
40 41 42

	BLKG_RWSTAT_NR,
	BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
43 44
};

45 46
struct blkcg_gq;

T
Tejun Heo 已提交
47
struct blkcg {
48 49
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
50 51

	struct radix_tree_root		blkg_tree;
52
	struct blkcg_gq	__rcu		*blkg_hint;
53
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
54

55
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
56

T
Tejun Heo 已提交
57
	struct list_head		all_blkcgs_node;
58 59
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
60
	refcount_t			cgwb_refcnt;
61
#endif
62 63
};

64 65
/*
 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
T
Tejun Heo 已提交
66 67
 * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
 * to carry result values from read and sum operations.
68
 */
69
struct blkg_stat {
T
Tejun Heo 已提交
70
	struct percpu_counter		cpu_cnt;
71
	atomic64_t			aux_cnt;
72 73 74
};

struct blkg_rwstat {
T
Tejun Heo 已提交
75
	struct percpu_counter		cpu_cnt[BLKG_RWSTAT_NR];
76
	atomic64_t			aux_cnt[BLKG_RWSTAT_NR];
77 78
};

79 80 81 82 83
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
84 85 86 87 88
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
89
 */
90
struct blkg_policy_data {
T
Tejun Heo 已提交
91
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
92
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
93
	int				plid;
94 95
};

96
/*
97 98 99 100 101
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
102 103
 */
struct blkcg_policy_data {
104 105
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
106 107 108
	int				plid;
};

T
Tejun Heo 已提交
109 110
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
111
	/* Pointer to the associated request_queue */
112 113 114
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
115
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
116

117 118 119 120 121 122
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
123 124 125
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

T
Tejun Heo 已提交
126
	/* reference count */
D
Dennis Zhou 已提交
127
	atomic_t			refcnt;
128

129 130 131
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

132 133 134
	struct blkg_rwstat		stat_bytes;
	struct blkg_rwstat		stat_ios;

135
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
136

137
	struct rcu_head			rcu_head;
138 139 140 141 142 143

	atomic_t			use_delay;
	atomic64_t			delay_nsec;
	atomic64_t			delay_start;
	u64				last_delay;
	int				last_use;
144 145
};

146
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
147
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
148
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
149
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
150
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
151 152 153
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156 157
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
				      size_t size);
158

T
Tejun Heo 已提交
159
struct blkcg_policy {
160 161
	int				plid;
	/* cgroup files for the policy */
162
	struct cftype			*dfl_cftypes;
163
	struct cftype			*legacy_cftypes;
164 165

	/* operations */
166
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
167
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
168
	blkcg_pol_free_cpd_fn		*cpd_free_fn;
169
	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
170

171
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
172
	blkcg_pol_init_pd_fn		*pd_init_fn;
173 174
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
175
	blkcg_pol_free_pd_fn		*pd_free_fn;
176
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
177
	blkcg_pol_stat_pd_fn		*pd_stat_fn;
178 179
};

T
Tejun Heo 已提交
180
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
181
extern struct cgroup_subsys_state * const blkcg_root_css;
182

T
Tejun Heo 已提交
183 184
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
185 186
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
				      struct request_queue *q);
T
Tejun Heo 已提交
187 188
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
189 190 191
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
192

193
/* Blkio controller policy registration */
194
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
195
void blkcg_policy_unregister(struct blkcg_policy *pol);
196
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
197
			  const struct blkcg_policy *pol);
198
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
199
			     const struct blkcg_policy *pol);
200

201
const char *blkg_dev_name(struct blkcg_gq *blkg);
T
Tejun Heo 已提交
202
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
203 204
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
205
		       const struct blkcg_policy *pol, int data,
206
		       bool show_total);
207 208
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
209
			 const struct blkg_rwstat *rwstat);
210 211 212
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off);
213 214 215 216
int blkg_print_stat_bytes(struct seq_file *sf, void *v);
int blkg_print_stat_ios(struct seq_file *sf, void *v);
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
217

218 219 220 221
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
			    struct blkcg_policy *pol, int off);
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
					     struct blkcg_policy *pol, int off);
222

223
struct blkg_conf_ctx {
224
	struct gendisk			*disk;
T
Tejun Heo 已提交
225
	struct blkcg_gq			*blkg;
226
	char				*body;
227 228
};

T
Tejun Heo 已提交
229
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
230
		   char *input, struct blkg_conf_ctx *ctx);
231 232
void blkg_conf_finish(struct blkg_conf_ctx *ctx);

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/**
 * blkcg_css - find the current css
 *
 * Find the css associated with either the kthread or the current task.
 * This may return a dying css, so it is up to the caller to use tryget logic
 * to confirm it is alive and well.
 */
static inline struct cgroup_subsys_state *blkcg_css(void)
{
	struct cgroup_subsys_state *css;

	css = kthread_blkcg();
	if (css)
		return css;
	return task_css(current, io_cgrp_id);
}

/**
 * blkcg_get_css - find and get a reference to the css
 *
 * Find the css associated with either the kthread or the current task.
 * This takes a reference on the blkcg which will need to be managed by the
 * caller.
 */
static inline struct cgroup_subsys_state *blkcg_get_css(void)
{
	struct cgroup_subsys_state *css;

	rcu_read_lock();

	css = kthread_blkcg();
	if (css) {
		css_get(css);
	} else {
		/*
		 * This is a bit complicated.  It is possible task_css() is
		 * seeing an old css pointer here.  This is caused by the
		 * current thread migrating away from this cgroup and this
		 * cgroup dying.  css_tryget() will fail when trying to take a
		 * ref on a cgroup that's ref count has hit 0.
		 *
		 * Therefore, if it does fail, this means current must have
		 * been swapped away already and this is waiting for it to
		 * propagate on the polling cpu.  Hence the use of cpu_relax().
		 */
		while (true) {
			css = task_css(current, io_cgrp_id);
			if (likely(css_tryget(css)))
				break;
			cpu_relax();
		}
	}

	rcu_read_unlock();

	return css;
}
290

291 292 293 294 295
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/**
 * __bio_blkcg - internal, inconsistent version to get blkcg
 *
 * DO NOT USE.
 * This function is inconsistent and consequently is dangerous to use.  The
 * first part of the function returns a blkcg where a reference is owned by the
 * bio.  This means it does not need to be rcu protected as it cannot go away
 * with the bio owning a reference to it.  However, the latter potentially gets
 * it from task_css().  This can race against task migration and the cgroup
 * dying.  It is also semantically different as it must be called rcu protected
 * and is susceptible to failure when trying to get a reference to it.
 * Therefore, it is not ok to assume that *_get() will always succeed on the
 * blkcg returned here.
 */
static inline struct blkcg *__bio_blkcg(struct bio *bio)
311
{
312 313 314 315
	if (bio && bio->bi_css)
		return css_to_blkcg(bio->bi_css);
	return css_to_blkcg(blkcg_css());
}
D
Dennis Zhou 已提交
316

317 318 319 320 321 322 323 324 325 326
/**
 * bio_blkcg - grab the blkcg associated with a bio
 * @bio: target bio
 *
 * This returns the blkcg associated with a bio, %NULL if not associated.
 * Callers are expected to either handle %NULL or know association has been
 * done prior to calling this.
 */
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
D
Dennis Zhou 已提交
327 328
	if (bio && bio->bi_css)
		return css_to_blkcg(bio->bi_css);
329
	return NULL;
330 331
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
static inline bool blk_cgroup_congested(void)
{
	struct cgroup_subsys_state *css;
	bool ret = false;

	rcu_read_lock();
	css = kthread_blkcg();
	if (!css)
		css = task_css(current, io_cgrp_id);
	while (css) {
		if (atomic_read(&css->cgroup->congestion_count)) {
			ret = true;
			break;
		}
		css = css->parent;
	}
	rcu_read_unlock();
	return ret;
}

352 353 354 355 356 357 358 359 360 361 362 363 364
/**
 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
 * @return: true if this bio needs to be submitted with the root blkg context.
 *
 * In order to avoid priority inversions we sometimes need to issue a bio as if
 * it were attached to the root blkg, and then backcharge to the actual owning
 * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
 * bio and attach the appropriate blkg to the bio.  Then we call this helper and
 * if it is true run with the root blkg for that queue and then do any
 * backcharging to the originating cgroup once the io is complete.
 */
static inline bool bio_issue_as_root_blkg(struct bio *bio)
{
J
Josef Bacik 已提交
365
	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
366 367
}

T
Tejun Heo 已提交
368 369 370 371 372 373 374 375
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
376
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
377 378
}

T
Tejun Heo 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

396 397 398
	if (blkcg == &blkcg_root)
		return q->root_blkg;

T
Tejun Heo 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
412
 * under RCU read loc.
T
Tejun Heo 已提交
413 414 415 416 417 418 419 420
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return __blkg_lookup(blkcg, q, false);
}

421
/**
422
 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
423 424 425 426
 * @q: request_queue of interest
 *
 * Lookup blkg for @q at the root level. See also blkg_lookup().
 */
427
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
428
{
429
	return q->root_blkg;
430 431
}

432 433 434 435 436 437 438
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
439 440
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
441
{
442
	return blkg ? blkg->pd[pol->plid] : NULL;
443 444
}

445 446 447
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
448
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
449 450
}

451 452
/**
 * pdata_to_blkg - get blkg associated with policy private data
453
 * @pd: policy private data of interest
454
 *
455
 * @pd is policy private data.  Determine the blkg it's associated with.
456
 */
457
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
458
{
459
	return pd ? pd->blkg : NULL;
460 461
}

462 463 464 465 466
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
extern void blkcg_destroy_blkgs(struct blkcg *blkcg);

#ifdef CONFIG_CGROUP_WRITEBACK

/**
 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
 * @blkcg: blkcg of interest
 *
 * This is used to track the number of active wb's related to a blkcg.
 */
static inline void blkcg_cgwb_get(struct blkcg *blkcg)
{
	refcount_inc(&blkcg->cgwb_refcnt);
}

/**
 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
 * @blkcg: blkcg of interest
 *
 * This is used to track the number of active wb's related to a blkcg.
 * When this count goes to zero, all active wb has finished so the
 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
 * This work may occur in cgwb_release_workfn() on the cgwb_release
 * workqueue.
 */
static inline void blkcg_cgwb_put(struct blkcg *blkcg)
{
	if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
		blkcg_destroy_blkgs(blkcg);
}

#else

static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }

static inline void blkcg_cgwb_put(struct blkcg *blkcg)
{
	/* wb isn't being accounted, so trigger destruction right away */
	blkcg_destroy_blkgs(blkcg);
}

#endif

T
Tejun Heo 已提交
510 511 512 513 514 515 516 517
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
518
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
519
{
520
	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
521 522
}

T
Tejun Heo 已提交
523 524 525 526
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
527
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
528
 */
T
Tejun Heo 已提交
529
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
530
{
D
Dennis Zhou 已提交
531 532
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	atomic_inc(&blkg->refcnt);
T
Tejun Heo 已提交
533 534
}

535
/**
D
Dennis Zhou 已提交
536
 * blkg_try_get - try and get a blkg reference
537 538 539 540 541
 * @blkg: blkg to get
 *
 * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
 * of freeing this blkg, so we can only use it if the refcnt is not zero.
 */
D
Dennis Zhou 已提交
542
static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
543
{
D
Dennis Zhou 已提交
544 545 546
	if (atomic_inc_not_zero(&blkg->refcnt))
		return blkg;
	return NULL;
547 548
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562
/**
 * blkg_try_get_closest - try and get a blkg ref on the closet blkg
 * @blkg: blkg to get
 *
 * This walks up the blkg tree to find the closest non-dying blkg and returns
 * the blkg that it did association with as it may not be the passed in blkg.
 */
static inline struct blkcg_gq *blkg_try_get_closest(struct blkcg_gq *blkg)
{
	while (!atomic_inc_not_zero(&blkg->refcnt))
		blkg = blkg->parent;

	return blkg;
}
563

D
Dennis Zhou 已提交
564
void __blkg_release_rcu(struct rcu_head *rcu);
565

T
Tejun Heo 已提交
566 567 568 569
/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
570
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
571
{
D
Dennis Zhou 已提交
572 573 574
	WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
	if (atomic_dec_and_test(&blkg->refcnt))
		call_rcu(&blkg->rcu_head, __blkg_release_rcu);
T
Tejun Heo 已提交
575 576
}

577 578 579
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
580
 * @pos_css: used for iteration
581 582 583 584 585
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
586
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
587
 * @p_blkg is included in the iteration and the first node to be visited.
588
 */
589 590 591
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
592 593
					      (p_blkg)->q, false)))

594 595 596
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
597
 * @pos_css: used for iteration
598 599 600
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
601 602
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
603
 */
604 605 606
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
607 608
					      (p_blkg)->q, false)))

T
Tejun Heo 已提交
609
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
610
{
T
Tejun Heo 已提交
611 612 613 614 615 616
	int ret;

	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
	if (ret)
		return ret;

617
	atomic64_set(&stat->aux_cnt, 0);
T
Tejun Heo 已提交
618 619 620 621 622 623
	return 0;
}

static inline void blkg_stat_exit(struct blkg_stat *stat)
{
	percpu_counter_destroy(&stat->cpu_cnt);
624 625
}

626 627 628 629 630
/**
 * blkg_stat_add - add a value to a blkg_stat
 * @stat: target blkg_stat
 * @val: value to add
 *
T
Tejun Heo 已提交
631 632
 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
 * don't re-enter this function for the same counter.
633 634 635
 */
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
636
	percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
637 638 639 640 641 642 643 644
}

/**
 * blkg_stat_read - read the current value of a blkg_stat
 * @stat: blkg_stat to read
 */
static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
{
T
Tejun Heo 已提交
645
	return percpu_counter_sum_positive(&stat->cpu_cnt);
646 647 648 649 650 651 652 653
}

/**
 * blkg_stat_reset - reset a blkg_stat
 * @stat: blkg_stat to reset
 */
static inline void blkg_stat_reset(struct blkg_stat *stat)
{
T
Tejun Heo 已提交
654
	percpu_counter_set(&stat->cpu_cnt, 0);
655
	atomic64_set(&stat->aux_cnt, 0);
656 657
}

658
/**
659
 * blkg_stat_add_aux - add a blkg_stat into another's aux count
660 661 662
 * @to: the destination blkg_stat
 * @from: the source
 *
663
 * Add @from's count including the aux one to @to's aux count.
664
 */
665 666
static inline void blkg_stat_add_aux(struct blkg_stat *to,
				     struct blkg_stat *from)
667
{
668 669
	atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
		     &to->aux_cnt);
670 671
}

T
Tejun Heo 已提交
672
static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
673
{
T
Tejun Heo 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686
	int i, ret;

	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
		if (ret) {
			while (--i >= 0)
				percpu_counter_destroy(&rwstat->cpu_cnt[i]);
			return ret;
		}
		atomic64_set(&rwstat->aux_cnt[i], 0);
	}
	return 0;
}
687

T
Tejun Heo 已提交
688 689 690
static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
{
	int i;
691 692

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
T
Tejun Heo 已提交
693
		percpu_counter_destroy(&rwstat->cpu_cnt[i]);
694 695
}

696 697 698
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
699
 * @op: REQ_OP and flags
700 701 702 703 704 705
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
706
				   unsigned int op, uint64_t val)
707
{
T
Tejun Heo 已提交
708
	struct percpu_counter *cnt;
709

710 711 712
	if (op_is_discard(op))
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
	else if (op_is_write(op))
T
Tejun Heo 已提交
713
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
714
	else
T
Tejun Heo 已提交
715 716
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];

717
	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
T
Tejun Heo 已提交
718

719
	if (op_is_sync(op))
T
Tejun Heo 已提交
720
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
721
	else
T
Tejun Heo 已提交
722
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
723

724
	percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
725 726 727 728 729 730
}

/**
 * blkg_rwstat_read - read the current values of a blkg_rwstat
 * @rwstat: blkg_rwstat to read
 *
T
Tejun Heo 已提交
731
 * Read the current snapshot of @rwstat and return it in the aux counts.
732
 */
733
static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
734
{
T
Tejun Heo 已提交
735 736
	struct blkg_rwstat result;
	int i;
737

T
Tejun Heo 已提交
738 739 740 741
	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_set(&result.aux_cnt[i],
			     percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
	return result;
742 743 744
}

/**
745
 * blkg_rwstat_total - read the total count of a blkg_rwstat
746 747 748 749 750 751
 * @rwstat: blkg_rwstat to read
 *
 * Return the total count of @rwstat regardless of the IO direction.  This
 * function can be called without synchronization and takes care of u64
 * atomicity.
 */
752
static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
753 754 755
{
	struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);

T
Tejun Heo 已提交
756 757
	return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
758 759 760 761 762 763 764 765
}

/**
 * blkg_rwstat_reset - reset a blkg_rwstat
 * @rwstat: blkg_rwstat to reset
 */
static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
{
766 767
	int i;

T
Tejun Heo 已提交
768 769
	for (i = 0; i < BLKG_RWSTAT_NR; i++) {
		percpu_counter_set(&rwstat->cpu_cnt[i], 0);
770
		atomic64_set(&rwstat->aux_cnt[i], 0);
T
Tejun Heo 已提交
771
	}
772 773
}

774
/**
775
 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
776 777 778
 * @to: the destination blkg_rwstat
 * @from: the source
 *
779
 * Add @from's count including the aux one to @to's aux count.
780
 */
781 782
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
				       struct blkg_rwstat *from)
783
{
784
	u64 sum[BLKG_RWSTAT_NR];
785 786 787
	int i;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
788 789 790 791
		sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
792
			     &to->aux_cnt[i]);
793 794
}

795 796 797 798 799 800 801 802
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

803 804 805 806 807 808

static inline void blkcg_bio_issue_init(struct bio *bio)
{
	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
}

809 810 811 812 813 814
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
	struct blkcg_gq *blkg;
	bool throtl = false;

815 816 817 818 819 820 821 822
	if (!bio->bi_blkg) {
		char b[BDEVNAME_SIZE];

		WARN_ONCE(1,
			  "no blkg associated for bio on block-device: %s\n",
			  bio_devname(bio, b));
		bio_associate_blkg(bio);
	}
D
Dennis Zhou 已提交
823

824
	blkg = bio->bi_blkg;
825 826 827

	throtl = blk_throtl_bio(q, blkg, bio);

828
	if (!throtl) {
829 830 831 832 833 834 835 836
		/*
		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
		 * is a split bio and we would have already accounted for the
		 * size of the bio.
		 */
		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
			blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
					bio->bi_iter.bi_size);
837
		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
838 839
	}

840 841
	blkcg_bio_issue_init(bio);

842 843 844
	return !throtl;
}

845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
	if (atomic_add_return(1, &blkg->use_delay) == 1)
		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
}

static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);

	if (old == 0)
		return 0;

	/*
	 * We do this song and dance because we can race with somebody else
	 * adding or removing delay.  If we just did an atomic_dec we'd end up
	 * negative and we'd already be in trouble.  We need to subtract 1 and
	 * then check to see if we were the last delay so we can drop the
	 * congestion count on the cgroup.
	 */
	while (old) {
		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
		if (cur == old)
			break;
		old = cur;
	}

	if (old == 0)
		return 0;
	if (old == 1)
		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
	return 1;
}

static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);
	if (!old)
		return;
	/* We only want 1 person clearing the congestion count for this blkg. */
	while (old) {
		int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
		if (cur == old) {
			atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
			break;
		}
		old = cur;
	}
}

void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
898 899
#else	/* CONFIG_BLK_CGROUP */

900 901
struct blkcg {
};
902

903 904 905
struct blkg_policy_data {
};

906 907 908
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
909
struct blkcg_gq {
910 911
};

T
Tejun Heo 已提交
912
struct blkcg_policy {
913 914
};

T
Tejun Heo 已提交
915 916
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

917 918 919
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }

920 921
#ifdef CONFIG_BLOCK

922 923
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }

T
Tejun Heo 已提交
924
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
925 926
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; }
927 928 929
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
930
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
931
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
932
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
933
					const struct blkcg_policy *pol) { return 0; }
934
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
935 936
					   const struct blkcg_policy *pol) { }

937
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
938
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
939

940 941 942
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
943 944 945
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
946

947
static inline void blkcg_bio_issue_init(struct bio *bio) { }
948 949 950
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

951 952 953
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

954
#endif	/* CONFIG_BLOCK */
955 956
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */