提交 e43473b7 编写于 作者: V Vivek Goyal 提交者: Jens Axboe

blkio: Core implementation of throttle policy

o Actual implementation of throttling policy in block layer. Currently it
  implements READ and WRITE bytes per second throttling logic. IOPS throttling
  comes in later patches.
Signed-off-by: NVivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <jaxboe@fusionio.com>
上级 4c9eefa1
...@@ -77,6 +77,18 @@ config BLK_DEV_INTEGRITY ...@@ -77,6 +77,18 @@ config BLK_DEV_INTEGRITY
T10/SCSI Data Integrity Field or the T13/ATA External Path T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N. Protection. If in doubt, say N.
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y && EXPERIMENTAL
default n
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
one needs to mount and use blkio cgroup controller for creating
cgroups and specifying per device IO rate policies.
See Documentation/cgroups/blkio-controller.txt for more information.
endif # BLOCK endif # BLOCK
config BLOCK_COMPAT config BLOCK_COMPAT
......
...@@ -9,6 +9,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ ...@@ -9,6 +9,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
......
...@@ -382,6 +382,7 @@ void blk_sync_queue(struct request_queue *q) ...@@ -382,6 +382,7 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->unplug_timer); del_timer_sync(&q->unplug_timer);
del_timer_sync(&q->timeout); del_timer_sync(&q->timeout);
cancel_work_sync(&q->unplug_work); cancel_work_sync(&q->unplug_work);
throtl_shutdown_timer_wq(q);
} }
EXPORT_SYMBOL(blk_sync_queue); EXPORT_SYMBOL(blk_sync_queue);
...@@ -459,6 +460,8 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -459,6 +460,8 @@ void blk_cleanup_queue(struct request_queue *q)
if (q->elevator) if (q->elevator)
elevator_exit(q->elevator); elevator_exit(q->elevator);
blk_throtl_exit(q);
blk_put_queue(q); blk_put_queue(q);
} }
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
...@@ -515,6 +518,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -515,6 +518,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
return NULL; return NULL;
} }
if (blk_throtl_init(q)) {
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
}
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
init_timer(&q->unplug_timer); init_timer(&q->unplug_timer);
...@@ -1522,6 +1530,15 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1522,6 +1530,15 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io; goto end_io;
} }
blk_throtl_bio(q, &bio);
/*
* If bio = NULL, bio has been throttled and will be submitted
* later.
*/
if (!bio)
break;
trace_block_bio_queue(q, bio); trace_block_bio_queue(q, bio);
ret = q->make_request_fn(q, bio); ret = q->make_request_fn(q, bio);
...@@ -2580,6 +2597,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) ...@@ -2580,6 +2597,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *dwork, unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
BUILD_BUG_ON(__REQ_NR_BITS > 8 * BUILD_BUG_ON(__REQ_NR_BITS > 8 *
......
此差异已折叠。
...@@ -130,6 +130,8 @@ enum rq_flag_bits { ...@@ -130,6 +130,8 @@ enum rq_flag_bits {
/* bio only flags */ /* bio only flags */
__REQ_UNPLUG, /* unplug the immediately after submission */ __REQ_UNPLUG, /* unplug the immediately after submission */
__REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
/* request only flags */ /* request only flags */
__REQ_SORTED, /* elevator knows about this request */ __REQ_SORTED, /* elevator knows about this request */
...@@ -172,6 +174,7 @@ enum rq_flag_bits { ...@@ -172,6 +174,7 @@ enum rq_flag_bits {
#define REQ_UNPLUG (1 << __REQ_UNPLUG) #define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED)
#define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
......
...@@ -371,6 +371,11 @@ struct request_queue ...@@ -371,6 +371,11 @@ struct request_queue
#if defined(CONFIG_BLK_DEV_BSG) #if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev; struct bsg_class_device bsg_dev;
#endif #endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
}; };
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
...@@ -1131,6 +1136,7 @@ static inline void put_dev_sector(Sector p) ...@@ -1131,6 +1136,7 @@ static inline void put_dev_sector(Sector p)
struct work_struct; struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
/* /*
...@@ -1174,6 +1180,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) ...@@ -1174,6 +1180,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
} }
#endif #endif
#ifdef CONFIG_BLK_DEV_THROTTLING
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
extern void throtl_shutdown_timer_wq(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
{
return 0;
}
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
#endif /* CONFIG_BLK_DEV_THROTTLING */
#define MODULE_ALIAS_BLOCKDEV(major,minor) \ #define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
......
...@@ -634,11 +634,14 @@ config BLK_CGROUP ...@@ -634,11 +634,14 @@ config BLK_CGROUP
Currently, CFQ IO scheduler uses it to recognize task groups and Currently, CFQ IO scheduler uses it to recognize task groups and
control disk bandwidth allocation (proportional time slice allocation) control disk bandwidth allocation (proportional time slice allocation)
to such task groups. to such task groups. It is also used by bio throttling logic in
block layer to implement upper limit in IO rates on a device.
This option only enables generic Block IO controller infrastructure. This option only enables generic Block IO controller infrastructure.
One needs to also enable actual IO controlling logic in CFQ for it One needs to also enable actual IO controlling logic/policy. For
to take effect. (CONFIG_CFQ_GROUP_IOSCHED=y). enabling proportional weight division of disk bandwidth in CFQ seti
CONFIG_CFQ_GROUP_IOSCHED=y and for enabling throttling policy set
CONFIG_BLK_THROTTLE=y.
See Documentation/cgroups/blkio-controller.txt for more information. See Documentation/cgroups/blkio-controller.txt for more information.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册