blk-cgroup.h 21.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#ifndef _BLK_CGROUP_H
#define _BLK_CGROUP_H
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
 */

#include <linux/cgroup.h>
18
#include <linux/percpu.h>
T
Tejun Heo 已提交
19
#include <linux/percpu_counter.h>
20
#include <linux/u64_stats_sync.h>
21
#include <linux/seq_file.h>
22
#include <linux/radix-tree.h>
23
#include <linux/blkdev.h>
24
#include <linux/atomic.h>
25
#include <linux/kthread.h>
26
#include <linux/fs.h>
27

T
Tejun Heo 已提交
28 29 30
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH	(INT_MAX / 2)

31 32 33
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX		UINT_MAX

T
Tejun Heo 已提交
34 35
#ifdef CONFIG_BLK_CGROUP

36 37 38 39 40 41 42 43
enum blkg_iostat_type {
	BLKG_IOSTAT_READ,
	BLKG_IOSTAT_WRITE,
	BLKG_IOSTAT_DISCARD,

	BLKG_IOSTAT_NR,
};

44 45
struct blkcg_gq;

T
Tejun Heo 已提交
46
struct blkcg {
47 48
	struct cgroup_subsys_state	css;
	spinlock_t			lock;
49 50

	struct radix_tree_root		blkg_tree;
51
	struct blkcg_gq	__rcu		*blkg_hint;
52
	struct hlist_head		blkg_list;
T
Tejun Heo 已提交
53

54
	struct blkcg_policy_data	*cpd[BLKCG_MAX_POLS];
55

T
Tejun Heo 已提交
56
	struct list_head		all_blkcgs_node;
57 58
#ifdef CONFIG_CGROUP_WRITEBACK
	struct list_head		cgwb_list;
59
	refcount_t			cgwb_refcnt;
60
#endif
61 62
};

63 64 65 66 67 68 69 70 71 72 73
struct blkg_iostat {
	u64				bytes[BLKG_IOSTAT_NR];
	u64				ios[BLKG_IOSTAT_NR];
};

struct blkg_iostat_set {
	struct u64_stats_sync		sync;
	struct blkg_iostat		cur;
	struct blkg_iostat		last;
};

74 75 76 77 78
/*
 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
 * request_queue (q).  This is used by blkcg policies which need to track
 * information per blkcg - q pair.
 *
79 80 81 82 83
 * There can be multiple active blkcg policies and each blkg:policy pair is
 * represented by a blkg_policy_data which is allocated and freed by each
 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
 * area by allocating larger data structure which embeds blkg_policy_data
 * at the beginning.
84
 */
85
struct blkg_policy_data {
T
Tejun Heo 已提交
86
	/* the blkg and policy id this per-policy data belongs to */
T
Tejun Heo 已提交
87
	struct blkcg_gq			*blkg;
T
Tejun Heo 已提交
88
	int				plid;
89 90
};

91
/*
92 93 94 95 96
 * Policies that need to keep per-blkcg data which is independent from any
 * request_queue associated to it should implement cpd_alloc/free_fn()
 * methods.  A policy can allocate private data area by allocating larger
 * data structure which embeds blkcg_policy_data at the beginning.
 * cpd_init() is invoked to let each policy handle per-blkcg data.
97 98
 */
struct blkcg_policy_data {
99 100
	/* the blkcg and policy id this per-policy data belongs to */
	struct blkcg			*blkcg;
101 102 103
	int				plid;
};

T
Tejun Heo 已提交
104 105
/* association between a blk cgroup and a request queue */
struct blkcg_gq {
T
Tejun Heo 已提交
106
	/* Pointer to the associated request_queue */
107 108 109
	struct request_queue		*q;
	struct list_head		q_node;
	struct hlist_node		blkcg_node;
T
Tejun Heo 已提交
110
	struct blkcg			*blkcg;
T
Tejun Heo 已提交
111

112 113 114 115 116 117
	/*
	 * Each blkg gets congested separately and the congestion state is
	 * propagated to the matching bdi_writeback_congested.
	 */
	struct bdi_writeback_congested	*wb_congested;

T
Tejun Heo 已提交
118 119 120
	/* all non-root blkcg_gq's are guaranteed to have access to parent */
	struct blkcg_gq			*parent;

T
Tejun Heo 已提交
121
	/* reference count */
122
	struct percpu_ref		refcnt;
123

124 125 126
	/* is this blkg online? protected by both blkcg and q locks */
	bool				online;

127 128
	struct blkg_iostat_set __percpu	*iostat_cpu;
	struct blkg_iostat_set		iostat;
129

130
	struct blkg_policy_data		*pd[BLKCG_MAX_POLS];
T
Tejun Heo 已提交
131

T
Tejun Heo 已提交
132 133 134
	spinlock_t			async_bio_lock;
	struct bio_list			async_bios;
	struct work_struct		async_bio_work;
135 136 137 138 139 140

	atomic_t			use_delay;
	atomic64_t			delay_nsec;
	atomic64_t			delay_start;
	u64				last_delay;
	int				last_use;
T
Tejun Heo 已提交
141 142

	struct rcu_head			rcu_head;
143 144
};

145
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
146
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
147
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
148
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
149 150
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
				struct request_queue *q, struct blkcg *blkcg);
151 152 153
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156 157
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
				      size_t size);
158

T
Tejun Heo 已提交
159
struct blkcg_policy {
160 161
	int				plid;
	/* cgroup files for the policy */
162
	struct cftype			*dfl_cftypes;
163
	struct cftype			*legacy_cftypes;
164 165

	/* operations */
166
	blkcg_pol_alloc_cpd_fn		*cpd_alloc_fn;
167
	blkcg_pol_init_cpd_fn		*cpd_init_fn;
168
	blkcg_pol_free_cpd_fn		*cpd_free_fn;
169
	blkcg_pol_bind_cpd_fn		*cpd_bind_fn;
170

171
	blkcg_pol_alloc_pd_fn		*pd_alloc_fn;
172
	blkcg_pol_init_pd_fn		*pd_init_fn;
173 174
	blkcg_pol_online_pd_fn		*pd_online_fn;
	blkcg_pol_offline_pd_fn		*pd_offline_fn;
175
	blkcg_pol_free_pd_fn		*pd_free_fn;
176
	blkcg_pol_reset_pd_stats_fn	*pd_reset_stats_fn;
177
	blkcg_pol_stat_pd_fn		*pd_stat_fn;
178 179
};

T
Tejun Heo 已提交
180
extern struct blkcg blkcg_root;
T
Tejun Heo 已提交
181
extern struct cgroup_subsys_state * const blkcg_root_css;
182
extern bool blkcg_debug_stats;
183

T
Tejun Heo 已提交
184 185
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint);
186 187
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
				      struct request_queue *q);
T
Tejun Heo 已提交
188 189
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q);
190 191 192
int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
193

194
/* Blkio controller policy registration */
195
int blkcg_policy_register(struct blkcg_policy *pol);
T
Tejun Heo 已提交
196
void blkcg_policy_unregister(struct blkcg_policy *pol);
197
int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
198
			  const struct blkcg_policy *pol);
199
void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
200
			     const struct blkcg_policy *pol);
201

202
const char *blkg_dev_name(struct blkcg_gq *blkg);
T
Tejun Heo 已提交
203
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
204 205
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
T
Tejun Heo 已提交
206
		       const struct blkcg_policy *pol, int data,
207
		       bool show_total);
208
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
209

210
struct blkg_conf_ctx {
211
	struct gendisk			*disk;
T
Tejun Heo 已提交
212
	struct blkcg_gq			*blkg;
213
	char				*body;
214 215
};

216
struct gendisk *blkcg_conf_get_disk(char **inputp);
T
Tejun Heo 已提交
217
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
218
		   char *input, struct blkg_conf_ctx *ctx);
219 220
void blkg_conf_finish(struct blkg_conf_ctx *ctx);

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
/**
 * blkcg_css - find the current css
 *
 * Find the css associated with either the kthread or the current task.
 * This may return a dying css, so it is up to the caller to use tryget logic
 * to confirm it is alive and well.
 */
static inline struct cgroup_subsys_state *blkcg_css(void)
{
	struct cgroup_subsys_state *css;

	css = kthread_blkcg();
	if (css)
		return css;
	return task_css(current, io_cgrp_id);
}

238 239 240 241 242
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct blkcg, css) : NULL;
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/**
 * __bio_blkcg - internal, inconsistent version to get blkcg
 *
 * DO NOT USE.
 * This function is inconsistent and consequently is dangerous to use.  The
 * first part of the function returns a blkcg where a reference is owned by the
 * bio.  This means it does not need to be rcu protected as it cannot go away
 * with the bio owning a reference to it.  However, the latter potentially gets
 * it from task_css().  This can race against task migration and the cgroup
 * dying.  It is also semantically different as it must be called rcu protected
 * and is susceptible to failure when trying to get a reference to it.
 * Therefore, it is not ok to assume that *_get() will always succeed on the
 * blkcg returned here.
 */
static inline struct blkcg *__bio_blkcg(struct bio *bio)
258
{
259 260
	if (bio && bio->bi_blkg)
		return bio->bi_blkg->blkcg;
261 262
	return css_to_blkcg(blkcg_css());
}
D
Dennis Zhou 已提交
263

264 265 266 267 268 269 270 271 272 273
/**
 * bio_blkcg - grab the blkcg associated with a bio
 * @bio: target bio
 *
 * This returns the blkcg associated with a bio, %NULL if not associated.
 * Callers are expected to either handle %NULL or know association has been
 * done prior to calling this.
 */
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
274 275
	if (bio && bio->bi_blkg)
		return bio->bi_blkg->blkcg;
276
	return NULL;
277 278
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static inline bool blk_cgroup_congested(void)
{
	struct cgroup_subsys_state *css;
	bool ret = false;

	rcu_read_lock();
	css = kthread_blkcg();
	if (!css)
		css = task_css(current, io_cgrp_id);
	while (css) {
		if (atomic_read(&css->cgroup->congestion_count)) {
			ret = true;
			break;
		}
		css = css->parent;
	}
	rcu_read_unlock();
	return ret;
}

299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
 * @return: true if this bio needs to be submitted with the root blkg context.
 *
 * In order to avoid priority inversions we sometimes need to issue a bio as if
 * it were attached to the root blkg, and then backcharge to the actual owning
 * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
 * bio and attach the appropriate blkg to the bio.  Then we call this helper and
 * if it is true run with the root blkg for that queue and then do any
 * backcharging to the originating cgroup once the io is complete.
 */
static inline bool bio_issue_as_root_blkg(struct bio *bio)
{
J
Josef Bacik 已提交
312
	return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
313 314
}

T
Tejun Heo 已提交
315 316 317 318 319 320 321 322
/**
 * blkcg_parent - get the parent of a blkcg
 * @blkcg: blkcg of interest
 *
 * Return the parent blkcg of @blkcg.  Can be called anytime.
 */
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
T
Tejun Heo 已提交
323
	return css_to_blkcg(blkcg->css.parent);
T
Tejun Heo 已提交
324 325
}

T
Tejun Heo 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/**
 * __blkg_lookup - internal version of blkg_lookup()
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 * @update_hint: whether to update lookup hint with the result or not
 *
 * This is internal version and shouldn't be used by policy
 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 * @q's bypass state.  If @update_hint is %true, the caller should be
 * holding @q->queue_lock and lookup hint is updated on success.
 */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
					     struct request_queue *q,
					     bool update_hint)
{
	struct blkcg_gq *blkg;

343 344 345
	if (blkcg == &blkcg_root)
		return q->root_blkg;

T
Tejun Heo 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358
	blkg = rcu_dereference(blkcg->blkg_hint);
	if (blkg && blkg->q == q)
		return blkg;

	return blkg_lookup_slowpath(blkcg, q, update_hint);
}

/**
 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  This function should be called
359
 * under RCU read lock.
T
Tejun Heo 已提交
360 361 362 363 364 365 366 367
 */
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
					   struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	return __blkg_lookup(blkcg, q, false);
}

368
/**
369
 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
370 371 372 373
 * @q: request_queue of interest
 *
 * Lookup blkg for @q at the root level. See also blkg_lookup().
 */
374
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
375
{
376
	return q->root_blkg;
377 378
}

379 380 381 382 383 384 385
/**
 * blkg_to_pdata - get policy private data
 * @blkg: blkg of interest
 * @pol: policy of interest
 *
 * Return pointer to private data associated with the @blkg-@pol pair.
 */
386 387
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol)
388
{
389
	return blkg ? blkg->pd[pol->plid] : NULL;
390 391
}

392 393 394
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
						     struct blkcg_policy *pol)
{
395
	return blkcg ? blkcg->cpd[pol->plid] : NULL;
396 397
}

398 399
/**
 * pdata_to_blkg - get blkg associated with policy private data
400
 * @pd: policy private data of interest
401
 *
402
 * @pd is policy private data.  Determine the blkg it's associated with.
403
 */
404
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
405
{
406
	return pd ? pd->blkg : NULL;
407 408
}

409 410 411 412 413
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
{
	return cpd ? cpd->blkcg : NULL;
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
extern void blkcg_destroy_blkgs(struct blkcg *blkcg);

#ifdef CONFIG_CGROUP_WRITEBACK

/**
 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
 * @blkcg: blkcg of interest
 *
 * This is used to track the number of active wb's related to a blkcg.
 */
static inline void blkcg_cgwb_get(struct blkcg *blkcg)
{
	refcount_inc(&blkcg->cgwb_refcnt);
}

/**
 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
 * @blkcg: blkcg of interest
 *
 * This is used to track the number of active wb's related to a blkcg.
 * When this count goes to zero, all active wb has finished so the
 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
 * This work may occur in cgwb_release_workfn() on the cgwb_release
 * workqueue.
 */
static inline void blkcg_cgwb_put(struct blkcg *blkcg)
{
	if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
		blkcg_destroy_blkgs(blkcg);
}

#else

static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }

static inline void blkcg_cgwb_put(struct blkcg *blkcg)
{
	/* wb isn't being accounted, so trigger destruction right away */
	blkcg_destroy_blkgs(blkcg);
}

#endif

T
Tejun Heo 已提交
457 458 459 460 461 462 463 464
/**
 * blkg_path - format cgroup path of blkg
 * @blkg: blkg of interest
 * @buf: target buffer
 * @buflen: target buffer length
 *
 * Format the path of the cgroup of @blkg into @buf.
 */
T
Tejun Heo 已提交
465
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
466
{
467
	return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
468 469
}

T
Tejun Heo 已提交
470 471 472 473
/**
 * blkg_get - get a blkg reference
 * @blkg: blkg to get
 *
474
 * The caller should be holding an existing reference.
T
Tejun Heo 已提交
475
 */
T
Tejun Heo 已提交
476
static inline void blkg_get(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
477
{
478
	percpu_ref_get(&blkg->refcnt);
T
Tejun Heo 已提交
479 480
}

481
/**
482
 * blkg_tryget - try and get a blkg reference
483 484 485 486 487
 * @blkg: blkg to get
 *
 * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
 * of freeing this blkg, so we can only use it if the refcnt is not zero.
 */
488
static inline bool blkg_tryget(struct blkcg_gq *blkg)
489
{
490
	return blkg && percpu_ref_tryget(&blkg->refcnt);
491 492
}

493
/**
494
 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
495 496
 * @blkg: blkg to get
 *
497 498 499 500
 * This needs to be called rcu protected.  As the failure mode here is to walk
 * up the blkg tree, this ensure that the blkg->parent pointers are always
 * valid.  This returns the blkg that it ended up taking a reference on or %NULL
 * if no reference was taken.
501
 */
502
static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
503
{
504 505 506 507 508 509 510 511 512
	struct blkcg_gq *ret_blkg = NULL;

	WARN_ON_ONCE(!rcu_read_lock_held());

	while (blkg) {
		if (blkg_tryget(blkg)) {
			ret_blkg = blkg;
			break;
		}
513
		blkg = blkg->parent;
514
	}
515

516
	return ret_blkg;
517
}
518

T
Tejun Heo 已提交
519 520 521 522
/**
 * blkg_put - put a blkg reference
 * @blkg: blkg to put
 */
T
Tejun Heo 已提交
523
static inline void blkg_put(struct blkcg_gq *blkg)
T
Tejun Heo 已提交
524
{
525
	percpu_ref_put(&blkg->refcnt);
T
Tejun Heo 已提交
526 527
}

528 529 530
/**
 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
531
 * @pos_css: used for iteration
532 533 534 535 536
 * @p_blkg: target blkg to walk descendants of
 *
 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 * read locked.  If called under either blkcg or queue lock, the iteration
 * is guaranteed to include all and only online blkgs.  The caller may
537
 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
538
 * @p_blkg is included in the iteration and the first node to be visited.
539
 */
540 541 542
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
543 544
					      (p_blkg)->q, false)))

545 546 547
/**
 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 * @d_blkg: loop cursor pointing to the current descendant
548
 * @pos_css: used for iteration
549 550 551
 * @p_blkg: target blkg to walk descendants of
 *
 * Similar to blkg_for_each_descendant_pre() but performs post-order
552 553
 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 * included in the iteration and the last node to be visited.
554
 */
555 556 557
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)		\
	css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)	\
		if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),	\
558 559
					      (p_blkg)->q, false)))

560 561 562 563 564 565 566 567
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
			   struct bio *bio);
#else
static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
				  struct bio *bio) { return false; }
#endif

T
Tejun Heo 已提交
568 569 570 571 572 573 574 575 576
bool __blkcg_punt_bio_submit(struct bio *bio);

static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
	if (bio->bi_opf & REQ_CGROUP_PUNT)
		return __blkcg_punt_bio_submit(bio);
	else
		return false;
}
577 578 579 580 581 582

static inline void blkcg_bio_issue_init(struct bio *bio)
{
	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
}

583 584 585 586 587 588
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio)
{
	struct blkcg_gq *blkg;
	bool throtl = false;

589 590
	rcu_read_lock();

591 592 593 594 595 596 597 598
	if (!bio->bi_blkg) {
		char b[BDEVNAME_SIZE];

		WARN_ONCE(1,
			  "no blkg associated for bio on block-device: %s\n",
			  bio_devname(bio, b));
		bio_associate_blkg(bio);
	}
D
Dennis Zhou 已提交
599

600
	blkg = bio->bi_blkg;
601 602 603

	throtl = blk_throtl_bio(q, blkg, bio);

604
	if (!throtl) {
605 606 607 608 609 610 611 612 613 614 615 616 617 618
		struct blkg_iostat_set *bis;
		int rwd, cpu;

		if (op_is_discard(bio->bi_opf))
			rwd = BLKG_IOSTAT_DISCARD;
		else if (op_is_write(bio->bi_opf))
			rwd = BLKG_IOSTAT_WRITE;
		else
			rwd = BLKG_IOSTAT_READ;

		cpu = get_cpu();
		bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
		u64_stats_update_begin(&bis->sync);

619 620 621 622 623 624
		/*
		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
		 * is a split bio and we would have already accounted for the
		 * size of the bio.
		 */
		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
625 626 627 628 629 630
			bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
		bis->cur.ios[rwd]++;

		u64_stats_update_end(&bis->sync);
		cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
		put_cpu();
631 632
	}

633 634
	blkcg_bio_issue_init(bio);

635
	rcu_read_unlock();
636 637 638
	return !throtl;
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
{
	if (atomic_add_return(1, &blkg->use_delay) == 1)
		atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
}

static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);

	if (old == 0)
		return 0;

	/*
	 * We do this song and dance because we can race with somebody else
	 * adding or removing delay.  If we just did an atomic_dec we'd end up
	 * negative and we'd already be in trouble.  We need to subtract 1 and
	 * then check to see if we were the last delay so we can drop the
	 * congestion count on the cgroup.
	 */
	while (old) {
		int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
		if (cur == old)
			break;
		old = cur;
	}

	if (old == 0)
		return 0;
	if (old == 1)
		atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
	return 1;
}

static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
{
	int old = atomic_read(&blkg->use_delay);
	if (!old)
		return;
	/* We only want 1 person clearing the congestion count for this blkg. */
	while (old) {
		int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
		if (cur == old) {
			atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
			break;
		}
		old = cur;
	}
}

void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
692 693
#else	/* CONFIG_BLK_CGROUP */

694 695
struct blkcg {
};
696

697 698 699
struct blkg_policy_data {
};

700 701 702
struct blkcg_policy_data {
};

T
Tejun Heo 已提交
703
struct blkcg_gq {
704 705
};

T
Tejun Heo 已提交
706
struct blkcg_policy {
707 708
};

T
Tejun Heo 已提交
709 710
#define blkcg_root_css	((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))

711 712 713
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }

714 715
#ifdef CONFIG_BLOCK

716 717
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }

T
Tejun Heo 已提交
718
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
719 720
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; }
721 722 723
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
724
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
T
Tejun Heo 已提交
725
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
726
static inline int blkcg_activate_policy(struct request_queue *q,
T
Tejun Heo 已提交
727
					const struct blkcg_policy *pol) { return 0; }
728
static inline void blkcg_deactivate_policy(struct request_queue *q,
T
Tejun Heo 已提交
729 730
					   const struct blkcg_policy *pol) { }

731
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
732
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
733

734 735 736
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
						  struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
T
Tejun Heo 已提交
737 738 739
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
740

T
Tejun Heo 已提交
741
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
742
static inline void blkcg_bio_issue_init(struct bio *bio) { }
743 744 745
static inline bool blkcg_bio_issue_check(struct request_queue *q,
					 struct bio *bio) { return true; }

746 747 748
#define blk_queue_for_each_rl(rl, q)	\
	for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)

749
#endif	/* CONFIG_BLOCK */
750 751
#endif	/* CONFIG_BLK_CGROUP */
#endif	/* _BLK_CGROUP_H */