blk-cgroup.h 21.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
18
#include <linux/percpu.h>
T
Tejun Heo 已提交
19
#include <linux/percpu_counter.h>
20
#include <linux/u64_stats_sync.h>
21
#include <linux/seq_file.h>
22
#include <linux/radix-tree.h>
23
#include <linux/blkdev.h>
24
#include <linux/atomic.h>
25
#include <linux/kthread.h>
26
#include <linux/fs.h>
27

T
Tejun Heo 已提交
28 29 30
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)

31 32 33
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
34 35
#ifdef CONFIG_BLK_CGROUP

36 37 38 39 40 41 42 43
enum blkg_iostat_type {
	BLKG_IOSTAT_READ,
	BLKG_IOSTAT_WRITE,
	BLKG_IOSTAT_DISCARD,

	BLKG_IOSTAT_NR,
};

44 45
struct blkcg_gq;

T
Tejun Heo 已提交
46
struct blkcg {
47 48
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
49
	refcount_t			online_pin;
50 51

	struct radix_tree_root		blkg_tree;
52
	struct blkcg_gq	__rcu		*blkg_hint;
53
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
54

55
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
56

T
Tejun Heo 已提交
57
	struct list_head		all_blkcgs_node;
58 59 60
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
#endif
61 62
};

63 64 65 66 67 68 69 70 71 72 73
struct blkg_iostat {
	u64				bytes[BLKG_IOSTAT_NR];
	u64				ios[BLKG_IOSTAT_NR];
};

struct blkg_iostat_set {
	struct u64_stats_sync		sync;
	struct blkg_iostat		cur;
	struct blkg_iostat		last;
};

74 75 76 77 78
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
79 80 81 82 83
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
84
 */
85
struct blkg_policy_data {
T
Tejun Heo 已提交
86
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
87
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
88
	int				plid;
89 90
};

91
/*
92 93 94 95 96
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
97 98
 */
struct blkcg_policy_data {
99 100
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
101 102 103
	int				plid;
};

T
Tejun Heo 已提交
104 105
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
106
	/* Pointer to the associated request_queue */
107 108 109
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
110
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
111

112 113 114 115 116 117
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
118 119 120
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

T
Tejun Heo 已提交
121
	/* reference count */
122
	struct percpu_ref		refcnt;
123

124 125 126
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

127 128
	struct blkg_iostat_set __percpu	*iostat_cpu;
	struct blkg_iostat_set		iostat;
129

130
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
131

T
Tejun Heo 已提交
132 133 134
	spinlock_t			async_bio_lock;
	struct bio_list			async_bios;
	struct work_struct		async_bio_work;
135 136 137 138 139 140

	atomic_t			use_delay;
	atomic64_t			delay_nsec;
	atomic64_t			delay_start;
	u64				last_delay;
	int				last_use;
T
Tejun Heo 已提交
141 142

	struct rcu_head			rcu_head;
143 144
};

145
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
146
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
147
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
148
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
149 150
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
				struct request_queue *q, struct blkcg *blkcg);
151 152 153
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156 157
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
				      size_t size);
158

T
Tejun Heo 已提交
159
struct blkcg_policy {
160 161
	int				plid;
	/* cgroup files for the policy */
162
	struct cftype			*dfl_cftypes;
163
	struct cftype			*legacy_cftypes;
164 165

	/* operations */
166
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
167
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
168
	blkcg_pol_free_cpd_fn		*cpd_free_fn;
169
	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
170

171
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
172
	blkcg_pol_init_pd_fn		*pd_init_fn;
173 174
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
175
	blkcg_pol_free_pd_fn		*pd_free_fn;
176
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
177
	blkcg_pol_stat_pd_fn		*pd_stat_fn;
178 179
};

T
Tejun Heo 已提交
180
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
181
extern struct cgroup_subsys_state * const blkcg_root_css;
182
extern bool blkcg_debug_stats;
183

T
Tejun Heo 已提交
184 185
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
186 187
int blkcg_init_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
188

189
/* Blkio controller policy registration */
190
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
191
void blkcg_policy_unregister(struct blkcg_policy *pol);
192
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
193
			  const struct blkcg_policy *pol);
194
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
195
			     const struct blkcg_policy *pol);
196

197
const char *blkg_dev_name(struct blkcg_gq *blkg);
T
Tejun Heo 已提交
198
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
199 200
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
201
		       const struct blkcg_policy *pol, int data,
202
		       bool show_total);
203
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
204

205
struct blkg_conf_ctx {
206
	struct gendisk			*disk;
T
Tejun Heo 已提交
207
	struct blkcg_gq			*blkg;
208
	char				*body;
209 210
};

211
struct gendisk *blkcg_conf_get_disk(char **inputp);
T
Tejun Heo 已提交
212
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
213
		   char *input, struct blkg_conf_ctx *ctx);
214 215
void blkg_conf_finish(struct blkg_conf_ctx *ctx);

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/**
 * blkcg_css - find the current css
 *
 * Find the css associated with either the kthread or the current task.
 * This may return a dying css, so it is up to the caller to use tryget logic
 * to confirm it is alive and well.
 */
static inline struct cgroup_subsys_state *blkcg_css(void)
{
	struct cgroup_subsys_state *css;

	css = kthread_blkcg();
	if (css)
		return css;
	return task_css(current, io_cgrp_id);
}

233 234 235 236 237
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
/**
 * __bio_blkcg - internal, inconsistent version to get blkcg
 *
 * DO NOT USE.
 * This function is inconsistent and consequently is dangerous to use.  The
 * first part of the function returns a blkcg where a reference is owned by the
 * bio.  This means it does not need to be rcu protected as it cannot go away
 * with the bio owning a reference to it.  However, the latter potentially gets
 * it from task_css().  This can race against task migration and the cgroup
 * dying.  It is also semantically different as it must be called rcu protected
 * and is susceptible to failure when trying to get a reference to it.
 * Therefore, it is not ok to assume that *_get() will always succeed on the
 * blkcg returned here.
 */
static inline struct blkcg *__bio_blkcg(struct bio *bio)
253
{
254 255
	if (bio && bio->bi_blkg)
		return bio->bi_blkg->blkcg;
256 257
	return css_to_blkcg(blkcg_css());
}
D
Dennis Zhou 已提交
258

259 260 261 262 263 264 265 266 267 268
/**
 * bio_blkcg - grab the blkcg associated with a bio
 * @bio: target bio
 *
 * This returns the blkcg associated with a bio, %NULL if not associated.
 * Callers are expected to either handle %NULL or know association has been
 * done prior to calling this.
 */
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
269 270
	if (bio && bio->bi_blkg)
		return bio->bi_blkg->blkcg;
271
	return NULL;
272 273
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
static inline bool blk_cgroup_congested(void)
{
	struct cgroup_subsys_state *css;
	bool ret = false;

	rcu_read_lock();
	css = kthread_blkcg();
	if (!css)
		css = task_css(current, io_cgrp_id);
	while (css) {
		if (atomic_read(&css->cgroup->congestion_count)) {
			ret = true;
			break;
		}
		css = css->parent;
	}
	rcu_read_unlock();
	return ret;
}

294 295 296 297 298 299 300 301 302 303 304 305 306
/**
 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
 * @return: true if this bio needs to be submitted with the root blkg context.
 *
 * In order to avoid priority inversions we sometimes need to issue a bio as if
 * it were attached to the root blkg, and then backcharge to the actual owning
 * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
 * bio and attach the appropriate blkg to the bio.  Then we call this helper and
 * if it is true run with the root blkg for that queue and then do any
 * backcharging to the originating cgroup once the io is complete.
 */
static inline bool bio_issue_as_root_blkg(struct bio *bio)
{
J
Josef Bacik 已提交
307
	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
308 309
}

T
Tejun Heo 已提交
310 311 312 313 314 315 316 317
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
318
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
319 320
}

T
Tejun Heo 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

338 339 340
	if (blkcg == &blkcg_root)
		return q->root_blkg;

T
Tejun Heo 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
354
 * under RCU read lock.
T
Tejun Heo 已提交
355 356 357 358 359 360 361 362
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return __blkg_lookup(blkcg, q, false);
}

363
/**
364
 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
365 366 367 368
 * @q: request_queue of interest
 *
 * Lookup blkg for @q at the root level. See also blkg_lookup().
 */
369
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
370
{
371
	return q->root_blkg;
372 373
}

374 375 376 377 378 379 380
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
381 382
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
383
{
384
	return blkg ? blkg->pd[pol->plid] : NULL;
385 386
}

387 388 389
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
390
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
391 392
}

393 394
/**
 * pdata_to_blkg - get blkg associated with policy private data
395
 * @pd: policy private data of interest
396
 *
397
 * @pd is policy private data.  Determine the blkg it's associated with.
398
 */
399
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
400
{
401
	return pd ? pd->blkg : NULL;
402 403
}

404 405 406 407 408
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

409 410 411
extern void blkcg_destroy_blkgs(struct blkcg *blkcg);

/**
412
 * blkcg_pin_online - pin online state
413 414
 * @blkcg: blkcg of interest
 *
415 416 417
 * While pinned, a blkcg is kept online.  This is primarily used to
 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
 * while an associated cgwb is still active.
418
 */
419
static inline void blkcg_pin_online(struct blkcg *blkcg)
420
{
421
	refcount_inc(&blkcg->online_pin);
422 423 424
}

/**
425
 * blkcg_unpin_online - unpin online state
426 427
 * @blkcg: blkcg of interest
 *
428 429 430
 * This is primarily used to impedance-match blkg and cgwb lifetimes so
 * that blkg doesn't go offline while an associated cgwb is still active.
 * When this count goes to zero, all active cgwbs have finished so the
431 432
 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
 */
433
static inline void blkcg_unpin_online(struct blkcg *blkcg)
434
{
435 436 437
	do {
		if (!refcount_dec_and_test(&blkcg->online_pin))
			break;
438
		blkcg_destroy_blkgs(blkcg);
439 440
		blkcg = blkcg_parent(blkcg);
	} while (blkcg);
441 442
}

T
Tejun Heo 已提交
443 444 445 446 447 448 449 450
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
451
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
452
{
453
	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
454 455
}

T
Tejun Heo 已提交
456 457 458 459
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
460
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
461
 */
T
Tejun Heo 已提交
462
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
463
{
464
	percpu_ref_get(&blkg->refcnt);
T
Tejun Heo 已提交
465 466
}

467
/**
468
 * blkg_tryget - try and get a blkg reference
469 470 471 472 473
 * @blkg: blkg to get
 *
 * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
 * of freeing this blkg, so we can only use it if the refcnt is not zero.
 */
474
static inline bool blkg_tryget(struct blkcg_gq *blkg)
475
{
476
	return blkg && percpu_ref_tryget(&blkg->refcnt);
477 478
}

T
Tejun Heo 已提交
479 480 481 482
/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
483
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
484
{
485
	percpu_ref_put(&blkg->refcnt);
T
Tejun Heo 已提交
486 487
}

488 489 490
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
491
 * @pos_css: used for iteration
492 493 494 495 496
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
497
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
498
 * @p_blkg is included in the iteration and the first node to be visited.
499
 */
500 501 502
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
503 504
					      (p_blkg)->q, false)))

505 506 507
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
508
 * @pos_css: used for iteration
509 510 511
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
512 513
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
514
 */
515 516 517
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
518 519
					      (p_blkg)->q, false)))

520 521 522 523 524 525 526 527
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

T
Tejun Heo 已提交
528 529 530 531 532 533 534 535 536
bool __blkcg_punt_bio_submit(struct bio *bio);

static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
	if (bio->bi_opf & REQ_CGROUP_PUNT)
		return __blkcg_punt_bio_submit(bio);
	else
		return false;
}
537 538 539 540 541 542

static inline void blkcg_bio_issue_init(struct bio *bio)
{
	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
}

543 544 545
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
546
	struct blkcg_gq *blkg = bio->bi_blkg;
547 548 549
	bool throtl = false;

	throtl = blk_throtl_bio(q, blkg, bio);
550
	if (!throtl) {
551 552 553 554 555 556 557 558 559 560 561 562 563 564
		struct blkg_iostat_set *bis;
		int rwd, cpu;

		if (op_is_discard(bio->bi_opf))
			rwd = BLKG_IOSTAT_DISCARD;
		else if (op_is_write(bio->bi_opf))
			rwd = BLKG_IOSTAT_WRITE;
		else
			rwd = BLKG_IOSTAT_READ;

		cpu = get_cpu();
		bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
		u64_stats_update_begin(&bis->sync);

565
		/*
566 567 568
		 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a
		 * split bio and we would have already accounted for the size of
		 * the bio.
569
		 */
570 571
		if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
			bio_set_flag(bio, BIO_CGROUP_ACCT);
572
			bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
573
		}
574 575 576
		bis->cur.ios[rwd]++;

		u64_stats_update_end(&bis->sync);
577 578
		if (cgroup_subsys_on_dfl(io_cgrp_subsys))
			cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
579
		put_cpu();
580 581
	}

582 583
	blkcg_bio_issue_init(bio);

584 585 586
	return !throtl;
}

587 588
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
589 590
	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
		return;
591 592 593 594 595 596 597 598
	if (atomic_add_return(1, &blkg->use_delay) == 1)
		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
}

static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);

599 600
	if (WARN_ON_ONCE(old < 0))
		return 0;
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	if (old == 0)
		return 0;

	/*
	 * We do this song and dance because we can race with somebody else
	 * adding or removing delay.  If we just did an atomic_dec we'd end up
	 * negative and we'd already be in trouble.  We need to subtract 1 and
	 * then check to see if we were the last delay so we can drop the
	 * congestion count on the cgroup.
	 */
	while (old) {
		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
		if (cur == old)
			break;
		old = cur;
	}

	if (old == 0)
		return 0;
	if (old == 1)
		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
	return 1;
}

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
/**
 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
 * @blkg: target blkg
 * @delay: delay duration in nsecs
 *
 * When enabled with this function, the delay is not decayed and must be
 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
 */
static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
{
	int old = atomic_read(&blkg->use_delay);

	/* We only want 1 person setting the congestion count for this blkg. */
	if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);

	atomic64_set(&blkg->delay_nsec, delay);
}

/**
 * blkcg_clear_delay - Disable allocator delay mechanism
 * @blkg: target blkg
 *
 * Disable use_delay mechanism. See blkcg_set_delay().
 */
651 652 653
static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);
654

655
	/* We only want 1 person clearing the congestion count for this blkg. */
656 657
	if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
658 659 660 661 662
}

void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
663 664
#else	/* CONFIG_BLK_CGROUP */

665 666
struct blkcg {
};
667

668 669 670
struct blkg_policy_data {
};

671 672 673
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
674
struct blkcg_gq {
675 676
};

T
Tejun Heo 已提交
677
struct blkcg_policy {
678 679
};

T
Tejun Heo 已提交
680 681
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

682 683 684
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }

685 686
#ifdef CONFIG_BLOCK

687 688
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }

T
Tejun Heo 已提交
689
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
690 691
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; }
692 693
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_exit_queue(struct request_queue *q) { }
694
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
695
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
696
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
697
					const struct blkcg_policy *pol) { return 0; }
698
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
699 700
					   const struct blkcg_policy *pol) { }

701
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
702
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
703

704 705 706
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
707 708 709
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
710

T
Tejun Heo 已提交
711
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
712
static inline void blkcg_bio_issue_init(struct bio *bio) { }
713 714 715
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

716 717 718
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

719
#endif	/* CONFIG_BLOCK */
720 721
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */