提交 0d167518 编写于 作者: L Linus Torvalds

Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block

Merge block/IO core bits from Jens Axboe:
 "This is a bit bigger on the core side than usual, but that is purely
  because we decided to hold off on parts of Tejun's submission on 3.4
  to give it a bit more time to simmer.  As a consequence, it's seen a
  long cycle in for-next.

  It contains:

   - Bug fix from Dan, wrong locking type.
   - Relax splice gifting restriction from Eric.
   - A ton of updates from Tejun, primarily for blkcg.  This improves
     the code a lot, making the API nicer and cleaner, and also includes
     fixes for how we handle and tie policies and re-activate on
     switches.  The changes also include generic bug fixes.
   - A simple fix from Vivek, along with a fix for doing proper delayed
     allocation of the blkcg stats."

Fix up annoying conflict just due to different merge resolution in
Documentation/feature-removal-schedule.txt

* 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits)
  blkcg: tg_stats_alloc_lock is an irq lock
  vmsplice: relax alignement requirements for SPLICE_F_GIFT
  blkcg: use radix tree to index blkgs from blkcg
  blkcg: fix blkcg->css ref leak in __blkg_lookup_create()
  block: fix elvpriv allocation failure handling
  block: collapse blk_alloc_request() into get_request()
  blkcg: collapse blkcg_policy_ops into blkcg_policy
  blkcg: embed struct blkg_policy_data in policy specific data
  blkcg: mass rename of blkcg API
  blkcg: style cleanups for blk-cgroup.h
  blkcg: remove blkio_group->path[]
  blkcg: blkg_rwstat_read() was missing inline
  blkcg: shoot down blkgs if all policies are deactivated
  blkcg: drop stuff unused after per-queue policy activation update
  blkcg: implement per-queue policy activation
  blkcg: add request_queue->root_blkg
  blkcg: make request_queue bypassing on allocation
  blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing
  blkcg: make blkg_conf_prep() take @pol and return with queue lock held
  blkcg: remove static policy ID enums
  ...
...@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE ...@@ -23,8 +23,6 @@ config IOSCHED_DEADLINE
config IOSCHED_CFQ config IOSCHED_CFQ
tristate "CFQ I/O scheduler" tristate "CFQ I/O scheduler"
# If BLK_CGROUP is a module, CFQ has to be built as module.
depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y
default y default y
---help--- ---help---
The CFQ I/O scheduler tries to distribute bandwidth equally The CFQ I/O scheduler tries to distribute bandwidth equally
...@@ -34,8 +32,6 @@ config IOSCHED_CFQ ...@@ -34,8 +32,6 @@ config IOSCHED_CFQ
This is the default I/O scheduler. This is the default I/O scheduler.
Note: If BLK_CGROUP=m, then CFQ can be built only as module.
config CFQ_GROUP_IOSCHED config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support" bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP depends on IOSCHED_CFQ && BLK_CGROUP
......
此差异已折叠。
此差异已折叠。
...@@ -29,11 +29,13 @@ ...@@ -29,11 +29,13 @@
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/ratelimit.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/block.h> #include <trace/events/block.h>
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
...@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue); ...@@ -280,7 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
* *
* This function does not cancel any asynchronous activity arising * This function does not cancel any asynchronous activity arising
* out of elevator or throttling code. That would require elevaotor_exit() * out of elevator or throttling code. That would require elevaotor_exit()
* and blk_throtl_exit() to be called with queue lock initialized. * and blkcg_exit_queue() to be called with queue lock initialized.
* *
*/ */
void blk_sync_queue(struct request_queue *q) void blk_sync_queue(struct request_queue *q)
...@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -365,17 +367,23 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
elv_drain_elevator(q); /*
if (drain_all) * The caller might be trying to drain @q before its
blk_throtl_drain(q); * elevator is initialized.
*/
if (q->elevator)
elv_drain_elevator(q);
blkcg_drain_queue(q);
/* /*
* This function might be called on a queue which failed * This function might be called on a queue which failed
* driver init after queue creation. Some drivers * driver init after queue creation or is not yet fully
* (e.g. fd) get unhappy in such cases. Kick queue iff * active yet. Some drivers (e.g. fd and loop) get unhappy
* dispatch queue has something on it. * in such cases. Kick queue iff dispatch queue has
* something on it and @q has request_fn set.
*/ */
if (!list_empty(&q->queue_head)) if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q); __blk_run_queue(q);
drain |= q->rq.elvpriv; drain |= q->rq.elvpriv;
...@@ -402,6 +410,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -402,6 +410,49 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
} }
} }
/**
* blk_queue_bypass_start - enter queue bypass mode
* @q: queue of interest
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
* throttled or issued before. On return, it's guaranteed that no request
* is being throttled or has ELVPRIV set and blk_queue_bypass() %true
* inside queue or RCU read lock.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
bool drain;
spin_lock_irq(q->queue_lock);
drain = !q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
if (drain) {
blk_drain_queue(q, false);
/* ensure blk_queue_bypass() is %true inside RCU read lock */
synchronize_rcu();
}
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
/**
* blk_queue_bypass_end - leave queue bypass mode
* @q: queue of interest
*
* Leave bypass mode and restore the normal queueing behavior.
*/
void blk_queue_bypass_end(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
if (!--q->bypass_depth)
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
WARN_ON_ONCE(q->bypass_depth < 0);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
/** /**
* blk_cleanup_queue - shutdown a request queue * blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
...@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -418,6 +469,19 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
spin_lock_irq(lock); spin_lock_irq(lock);
/*
* Dead queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
* probing. This is still safe because blk_release_queue() will be
* called only after the queue refcnt drops to zero and nothing,
* RCU or not, would be traversing the queue by then.
*/
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q); queue_flag_set(QUEUE_FLAG_DEAD, q);
...@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -428,13 +492,8 @@ void blk_cleanup_queue(struct request_queue *q)
spin_unlock_irq(lock); spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
/* /* drain all requests queued before DEAD marking */
* Drain all requests queued before DEAD marking. The caller might blk_drain_queue(q, true);
* be trying to tear down @q before its elevator is initialized, in
* which case we don't want to call into draining.
*/
if (q->elevator)
blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */ /* @q won't process any more request, flush async actions */
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
...@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -498,14 +557,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (err) if (err)
goto fail_id; goto fail_id;
if (blk_throtl_init(q))
goto fail_id;
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q); laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
INIT_LIST_HEAD(&q->flush_queue[0]); INIT_LIST_HEAD(&q->flush_queue[0]);
INIT_LIST_HEAD(&q->flush_queue[1]); INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight); INIT_LIST_HEAD(&q->flush_data_in_flight);
...@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -522,6 +582,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
*/ */
q->queue_lock = &q->__queue_lock; q->queue_lock = &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
* unnecessary bypass on/off overhead and nasty surprises during
* init. The initial bypass will be finished at the end of
* blk_init_allocated_queue().
*/
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
if (blkcg_init_queue(q))
goto fail_id;
return q; return q;
fail_id: fail_id:
...@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, ...@@ -614,15 +686,15 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
/* /* init elevator */
* all done if (elevator_init(q, NULL))
*/ return NULL;
if (!elevator_init(q, NULL)) {
blk_queue_congestion_threshold(q);
return q;
}
return NULL; blk_queue_congestion_threshold(q);
/* all done, end the initial bypass */
blk_queue_bypass_end(q);
return q;
} }
EXPORT_SYMBOL(blk_init_allocated_queue); EXPORT_SYMBOL(blk_init_allocated_queue);
...@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) ...@@ -648,33 +720,6 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
} }
static struct request *
blk_alloc_request(struct request_queue *q, struct io_cq *icq,
unsigned int flags, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
if (!rq)
return NULL;
blk_rq_init(q, rq);
rq->cmd_flags = flags | REQ_ALLOCED;
if (flags & REQ_ELVPRIV) {
rq->elv.icq = icq;
if (unlikely(elv_set_request(q, rq, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
}
/* @rq->elv.icq holds on to io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
}
return rq;
}
/* /*
* ioc_batching returns true if the ioc is a valid batching request and * ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request. * should be given priority access to a request.
...@@ -762,6 +807,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio) ...@@ -762,6 +807,22 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
return true; return true;
} }
/**
* rq_ioc - determine io_context for request allocation
* @bio: request being allocated is for this bio (can be %NULL)
*
* Determine io_context to use for request allocation for @bio. May return
* %NULL if %current->io_context doesn't exist.
*/
static struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
if (bio && bio->bi_ioc)
return bio->bi_ioc;
#endif
return current->io_context;
}
/** /**
* get_request - get a free request * get_request - get a free request
* @q: request_queue to allocate request from * @q: request_queue to allocate request from
...@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio) ...@@ -779,7 +840,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
static struct request *get_request(struct request_queue *q, int rw_flags, static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
struct request *rq = NULL; struct request *rq;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct elevator_type *et; struct elevator_type *et;
struct io_context *ioc; struct io_context *ioc;
...@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -789,7 +850,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
int may_queue; int may_queue;
retry: retry:
et = q->elevator->type; et = q->elevator->type;
ioc = current->io_context; ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
...@@ -808,7 +869,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -808,7 +869,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
*/ */
if (!ioc && !retried) { if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
create_io_context(current, gfp_mask, q->node); create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
retried = true; retried = true;
goto retry; goto retry;
...@@ -831,7 +892,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -831,7 +892,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* process is not a "batcher", and not * process is not a "batcher", and not
* exempted by the IO scheduler * exempted by the IO scheduler
*/ */
goto out; return NULL;
} }
} }
} }
...@@ -844,7 +905,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -844,7 +905,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* allocated with any setting of ->nr_requests * allocated with any setting of ->nr_requests
*/ */
if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
goto out; return NULL;
rl->count[is_sync]++; rl->count[is_sync]++;
rl->starved[is_sync] = 0; rl->starved[is_sync] = 0;
...@@ -859,8 +920,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -859,8 +920,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* Also, lookup icq while holding queue_lock. If it doesn't exist, * Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock. * it will be created after releasing queue_lock.
*/ */
if (blk_rq_should_init_elevator(bio) && if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
rw_flags |= REQ_ELVPRIV; rw_flags |= REQ_ELVPRIV;
rl->elvpriv++; rl->elvpriv++;
if (et->icq_cache && ioc) if (et->icq_cache && ioc)
...@@ -871,41 +931,36 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -871,41 +931,36 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
rw_flags |= REQ_IO_STAT; rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* create icq if missing */ /* allocate and init request */
if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) { rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
icq = ioc_create_icq(q, gfp_mask); if (!rq)
if (!icq) goto fail_alloc;
goto fail_icq;
}
rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
fail_icq: blk_rq_init(q, rq);
if (unlikely(!rq)) { rq->cmd_flags = rw_flags | REQ_ALLOCED;
/*
* Allocation failed presumably due to memory. Undo anything /* init elvpriv */
* we might have messed up. if (rw_flags & REQ_ELVPRIV) {
* if (unlikely(et->icq_cache && !icq)) {
* Allocating task should really be put onto the front of the create_io_context(gfp_mask, q->node);
* wait queue, but this is pretty rare. ioc = rq_ioc(bio);
*/ if (!ioc)
spin_lock_irq(q->queue_lock); goto fail_elvpriv;
freed_request(q, rw_flags);
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
/* rq->elv.icq = icq;
* in the very unlikely event that allocation failed and no if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
* requests for this direction was pending, mark us starved goto fail_elvpriv;
* so that freeing of a request in the other direction will
* notice us. another possible fix would be to split the
* rq mempool into READ and WRITE
*/
rq_starved:
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;
goto out; /* @rq->elv.icq holds io_context until @rq is freed */
if (icq)
get_io_context(icq->ioc);
} }
out:
/* /*
* ioc may be NULL here, and ioc_batching will be false. That's * ioc may be NULL here, and ioc_batching will be false. That's
* OK, if the queue is under the request limit then requests need * OK, if the queue is under the request limit then requests need
...@@ -916,8 +971,48 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -916,8 +971,48 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
ioc->nr_batch_requests--; ioc->nr_batch_requests--;
trace_block_getrq(q, bio, rw_flags & 1); trace_block_getrq(q, bio, rw_flags & 1);
out:
return rq; return rq;
fail_elvpriv:
/*
* elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
* and may fail indefinitely under memory pressure and thus
* shouldn't stall IO. Treat this request as !elvpriv. This will
* disturb iosched and blkcg but weird is bettern than dead.
*/
printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
dev_name(q->backing_dev_info.dev));
rq->cmd_flags &= ~REQ_ELVPRIV;
rq->elv.icq = NULL;
spin_lock_irq(q->queue_lock);
rl->elvpriv--;
spin_unlock_irq(q->queue_lock);
goto out;
fail_alloc:
/*
* Allocation failed presumably due to memory. Undo anything we
* might have messed up.
*
* Allocating task should really be put onto the front of the wait
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(q, rw_flags);
/*
* in the very unlikely event that allocation failed and no
* requests for this direction was pending, mark us starved so that
* freeing of a request in the other direction will notice
* us. another possible fix would be to split the rq mempool into
* READ and WRITE
*/
rq_starved:
if (unlikely(rl->count[is_sync] == 0))
rl->starved[is_sync] = 1;
return NULL;
} }
/** /**
...@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, ...@@ -961,7 +1056,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
* up to a big batch of them for a small period time. * up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching * See ioc_batching, ioc_set_batching
*/ */
create_io_context(current, GFP_NOIO, q->node); create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context); ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
......
...@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc) ...@@ -155,20 +155,20 @@ void put_io_context(struct io_context *ioc)
} }
EXPORT_SYMBOL(put_io_context); EXPORT_SYMBOL(put_io_context);
/* Called by the exiting task */ /**
void exit_io_context(struct task_struct *task) * put_io_context_active - put active reference on ioc
* @ioc: ioc of interest
*
* Undo get_io_context_active(). If active reference reaches zero after
* put, @ioc can never issue further IOs and ioscheds are notified.
*/
void put_io_context_active(struct io_context *ioc)
{ {
struct io_context *ioc;
struct io_cq *icq;
struct hlist_node *n; struct hlist_node *n;
unsigned long flags; unsigned long flags;
struct io_cq *icq;
task_lock(task); if (!atomic_dec_and_test(&ioc->active_ref)) {
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
if (!atomic_dec_and_test(&ioc->nr_tasks)) {
put_io_context(ioc); put_io_context(ioc);
return; return;
} }
...@@ -197,6 +197,20 @@ void exit_io_context(struct task_struct *task) ...@@ -197,6 +197,20 @@ void exit_io_context(struct task_struct *task)
put_io_context(ioc); put_io_context(ioc);
} }
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
atomic_dec(&ioc->nr_tasks);
put_io_context_active(ioc);
}
/** /**
* ioc_clear_queue - break any ioc association with the specified queue * ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared * @q: request_queue being cleared
...@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q) ...@@ -218,19 +232,18 @@ void ioc_clear_queue(struct request_queue *q)
} }
} }
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
int node)
{ {
struct io_context *ioc; struct io_context *ioc;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node); node);
if (unlikely(!ioc)) if (unlikely(!ioc))
return; return -ENOMEM;
/* initialize */ /* initialize */
atomic_long_set(&ioc->refcount, 1); atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1); atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock); spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list); INIT_HLIST_HEAD(&ioc->icq_list);
...@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, ...@@ -250,6 +263,8 @@ void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
else else
kmem_cache_free(iocontext_cachep, ioc); kmem_cache_free(iocontext_cachep, ioc);
task_unlock(task); task_unlock(task);
return 0;
} }
/** /**
...@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task, ...@@ -281,7 +296,7 @@ struct io_context *get_task_io_context(struct task_struct *task,
return ioc; return ioc;
} }
task_unlock(task); task_unlock(task);
} while (create_io_context(task, gfp_flags, node)); } while (!create_task_io_context(task, gfp_flags, node));
return NULL; return NULL;
} }
...@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq); ...@@ -325,26 +340,23 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/** /**
* ioc_create_icq - create and link io_cq * ioc_create_icq - create and link io_cq
* @ioc: io_context of interest
* @q: request_queue of interest * @q: request_queue of interest
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* *
* Make sure io_cq linking %current->io_context and @q exists. If either * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* io_context and/or icq don't exist, they will be created using @gfp_mask. * will be created using @gfp_mask.
* *
* The caller is responsible for ensuring @ioc won't go away and @q is * The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns. * alive and will stay alive until this function returns.
*/ */
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask)
{ {
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
struct io_context *ioc;
struct io_cq *icq; struct io_cq *icq;
/* allocate stuff */ /* allocate stuff */
ioc = create_io_context(current, gfp_mask, q->node);
if (!ioc)
return NULL;
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node); q->node);
if (!icq) if (!icq)
...@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) ...@@ -382,74 +394,6 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
return icq; return icq;
} }
void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags)
{
struct io_cq *icq;
struct hlist_node *n;
hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
icq->flags |= flags;
}
/**
* ioc_ioprio_changed - notify ioprio change
* @ioc: io_context of interest
* @ioprio: new ioprio
*
* @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
* icq's. iosched is responsible for checking the bit and applying it on
* request issue path.
*/
void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
{
unsigned long flags;
spin_lock_irqsave(&ioc->lock, flags);
ioc->ioprio = ioprio;
ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
/**
* ioc_cgroup_changed - notify cgroup change
* @ioc: io_context of interest
*
* @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
* iosched is responsible for checking the bit and applying it on request
* issue path.
*/
void ioc_cgroup_changed(struct io_context *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->lock, flags);
ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED);
spin_unlock_irqrestore(&ioc->lock, flags);
}
EXPORT_SYMBOL(ioc_cgroup_changed);
/**
* icq_get_changed - fetch and clear icq changed mask
* @icq: icq of interest
*
* Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases
* @icq->ioc->lock.
*/
unsigned icq_get_changed(struct io_cq *icq)
{
unsigned int changed = 0;
unsigned long flags;
if (unlikely(icq->flags & ICQ_CHANGED_MASK)) {
spin_lock_irqsave(&icq->ioc->lock, flags);
changed = icq->flags & ICQ_CHANGED_MASK;
icq->flags &= ~ICQ_CHANGED_MASK;
spin_unlock_irqrestore(&icq->ioc->lock, flags);
}
return changed;
}
EXPORT_SYMBOL(icq_get_changed);
static int __init blk_ioc_init(void) static int __init blk_ioc_init(void)
{ {
iocontext_cachep = kmem_cache_create("blkdev_ioc", iocontext_cachep = kmem_cache_create("blkdev_ioc",
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h"
struct queue_sysfs_entry { struct queue_sysfs_entry {
struct attribute attr; struct attribute attr;
...@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -479,6 +480,8 @@ static void blk_release_queue(struct kobject *kobj)
blk_sync_queue(q); blk_sync_queue(q);
blkcg_exit_queue(q);
if (q->elevator) { if (q->elevator) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
ioc_clear_queue(q); ioc_clear_queue(q);
...@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -486,15 +489,12 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator); elevator_exit(q->elevator);
} }
blk_throtl_exit(q);
if (rl->rq_pool) if (rl->rq_pool)
mempool_destroy(rl->rq_pool); mempool_destroy(rl->rq_pool);
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
blk_throtl_release(q);
blk_trace_shutdown(q); blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
......
此差异已折叠。
...@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, ...@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio); struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq, int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio); struct bio *bio);
void blk_drain_queue(struct request_queue *q, bool drain_all); void blk_queue_bypass_start(struct request_queue *q);
void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq); void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q); void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error, bool __blk_end_bidi_request(struct request *rq, int error,
...@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q); ...@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void); int blk_dev_init(void);
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);
/* /*
* Return the threshold (number of used requests) at which the queue is * Return the threshold (number of used requests) at which the queue is
...@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq) ...@@ -186,32 +184,30 @@ static inline int blk_do_io_stat(struct request *rq)
*/ */
void get_io_context(struct io_context *ioc); void get_io_context(struct io_context *ioc);
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask); struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask);
void ioc_clear_queue(struct request_queue *q); void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask, int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
int node);
/** /**
* create_io_context - try to create task->io_context * create_io_context - try to create task->io_context
* @task: target task
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* @node: allocation node * @node: allocation node
* *
* If @task->io_context is %NULL, allocate a new io_context and install it. * If %current->io_context is %NULL, allocate a new io_context and install
* Returns the current @task->io_context which may be %NULL if allocation * it. Returns the current %current->io_context which may be %NULL if
* failed. * allocation failed.
* *
* Note that this function can't be called with IRQ disabled because * Note that this function can't be called with IRQ disabled because
* task_lock which protects @task->io_context is IRQ-unsafe. * task_lock which protects %current->io_context is IRQ-unsafe.
*/ */
static inline struct io_context *create_io_context(struct task_struct *task, static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
gfp_t gfp_mask, int node)
{ {
WARN_ON_ONCE(irqs_disabled()); WARN_ON_ONCE(irqs_disabled());
if (unlikely(!task->io_context)) if (unlikely(!current->io_context))
create_io_context_slowpath(task, gfp_mask, node); create_task_io_context(current, gfp_mask, node);
return task->io_context; return current->io_context;
} }
/* /*
...@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); ...@@ -222,7 +218,6 @@ extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern void blk_throtl_drain(struct request_queue *q); extern void blk_throtl_drain(struct request_queue *q);
extern int blk_throtl_init(struct request_queue *q); extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q); extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_release(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */ #else /* CONFIG_BLK_DEV_THROTTLING */
static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{ {
...@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -231,7 +226,6 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
static inline void blk_throtl_drain(struct request_queue *q) { } static inline void blk_throtl_drain(struct request_queue *q) { }
static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_release(struct request_queue *q) { }
#endif /* CONFIG_BLK_DEV_THROTTLING */ #endif /* CONFIG_BLK_DEV_THROTTLING */
#endif /* BLK_INTERNAL_H */ #endif /* BLK_INTERNAL_H */
此差异已折叠。
#ifndef _CFQ_H
#define _CFQ_H
#include "blk-cgroup.h"
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync)
{
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue)
{
blkiocg_update_dequeue_stats(blkg, dequeue);
}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time, unsigned long unaccounted_time)
{
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
{
blkiocg_set_start_empty_time(blkg);
}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_remove_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_merged_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_idle_time_stats(blkg);
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
{
blkiocg_update_avg_queue_size_stats(blkg);
}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_set_idle_time_stats(blkg);
}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync)
{
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
{
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
direction, sync);
}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {
blkiocg_add_blkio_group(blkcg, blkg, key, dev, BLKIO_POLICY_PROP);
}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return blkiocg_del_blkio_group(blkg);
}
#else /* CFQ_GROUP_IOSCHED */
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue) {}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time, unsigned long unaccounted_time) {}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return 0;
}
#endif /* CFQ_GROUP_IOSCHED */
#endif
...@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e) ...@@ -337,13 +337,13 @@ static void deadline_exit_queue(struct elevator_queue *e)
/* /*
* initialize elevator private data (deadline_data). * initialize elevator private data (deadline_data).
*/ */
static void *deadline_init_queue(struct request_queue *q) static int deadline_init_queue(struct request_queue *q)
{ {
struct deadline_data *dd; struct deadline_data *dd;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd) if (!dd)
return NULL; return -ENOMEM;
INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
...@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q) ...@@ -354,7 +354,9 @@ static void *deadline_init_queue(struct request_queue *q)
dd->writes_starved = writes_starved; dd->writes_starved = writes_starved;
dd->front_merges = 1; dd->front_merges = 1;
dd->fifo_batch = fifo_batch; dd->fifo_batch = fifo_batch;
return dd;
q->elevator->elevator_data = dd;
return 0;
} }
/* /*
......
此差异已折叠。
...@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq) ...@@ -59,15 +59,17 @@ noop_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist); return list_entry(rq->queuelist.next, struct request, queuelist);
} }
static void *noop_init_queue(struct request_queue *q) static int noop_init_queue(struct request_queue *q)
{ {
struct noop_data *nd; struct noop_data *nd;
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd) if (!nd)
return NULL; return -ENOMEM;
INIT_LIST_HEAD(&nd->queue); INIT_LIST_HEAD(&nd->queue);
return nd; q->elevator->elevator_data = nd;
return 0;
} }
static void noop_exit_queue(struct elevator_queue *e) static void noop_exit_queue(struct elevator_queue *e)
......
...@@ -19,12 +19,14 @@ ...@@ -19,12 +19,14 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/iocontext.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <scsi/sg.h> /* for struct sg_iovec */ #include <scsi/sg.h> /* for struct sg_iovec */
#include <trace/events/block.h> #include <trace/events/block.h>
...@@ -418,6 +420,7 @@ void bio_put(struct bio *bio) ...@@ -418,6 +420,7 @@ void bio_put(struct bio *bio)
* last put frees it * last put frees it
*/ */
if (atomic_dec_and_test(&bio->bi_cnt)) { if (atomic_dec_and_test(&bio->bi_cnt)) {
bio_disassociate_task(bio);
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_destructor(bio); bio->bi_destructor(bio);
} }
...@@ -1646,6 +1649,64 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) ...@@ -1646,6 +1649,64 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
} }
EXPORT_SYMBOL(bioset_create); EXPORT_SYMBOL(bioset_create);
#ifdef CONFIG_BLK_CGROUP
/**
* bio_associate_current - associate a bio with %current
* @bio: target bio
*
* Associate @bio with %current if it hasn't been associated yet. Block
* layer will treat @bio as if it were issued by %current no matter which
* task actually issues it.
*
* This function takes an extra reference of @task's io_context and blkcg
* which will be put when @bio is released. The caller must own @bio,
* ensure %current->io_context exists, and is responsible for synchronizing
* calls to this function.
*/
int bio_associate_current(struct bio *bio)
{
struct io_context *ioc;
struct cgroup_subsys_state *css;
if (bio->bi_ioc)
return -EBUSY;
ioc = current->io_context;
if (!ioc)
return -ENOENT;
/* acquire active ref on @ioc and associate */
get_io_context_active(ioc);
bio->bi_ioc = ioc;
/* associate blkcg if exists */
rcu_read_lock();
css = task_subsys_state(current, blkio_subsys_id);
if (css && css_tryget(css))
bio->bi_css = css;
rcu_read_unlock();
return 0;
}
/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
void bio_disassociate_task(struct bio *bio)
{
if (bio->bi_ioc) {
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
if (bio->bi_css) {
css_put(bio->bi_css);
bio->bi_css = NULL;
}
}
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void) static void __init biovec_init_slabs(void)
{ {
int i; int i;
......
...@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ...@@ -50,7 +50,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE); ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) { if (ioc) {
ioc_ioprio_changed(ioc, ioprio); ioc->ioprio = ioprio;
put_io_context(ioc); put_io_context(ioc);
} }
......
...@@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, ...@@ -1388,7 +1388,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
*/ */
static int get_iovec_page_array(const struct iovec __user *iov, static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages, unsigned int nr_vecs, struct page **pages,
struct partial_page *partial, int aligned, struct partial_page *partial, bool aligned,
unsigned int pipe_buffers) unsigned int pipe_buffers)
{ {
int buffers = 0, error = 0; int buffers = 0, error = 0;
...@@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, ...@@ -1626,7 +1626,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
return -ENOMEM; return -ENOMEM;
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
spd.partial, flags & SPLICE_F_GIFT, spd.partial, false,
pipe->buffers); pipe->buffers);
if (spd.nr_pages <= 0) if (spd.nr_pages <= 0)
ret = spd.nr_pages; ret = spd.nr_pages;
......
...@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set ...@@ -269,6 +269,14 @@ extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx); extern unsigned int bvec_nr_vecs(unsigned short idx);
#ifdef CONFIG_BLK_CGROUP
int bio_associate_current(struct bio *bio);
void bio_disassociate_task(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
static inline void bio_disassociate_task(struct bio *bio) { }
#endif /* CONFIG_BLK_CGROUP */
/* /*
* bio_set is used to allow other portions of the IO system to * bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures. * allocate their own private memory pools for bio and iovec structures.
......
...@@ -14,6 +14,8 @@ struct bio; ...@@ -14,6 +14,8 @@ struct bio;
struct bio_integrity_payload; struct bio_integrity_payload;
struct page; struct page;
struct block_device; struct block_device;
struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *, int); typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *);
...@@ -66,6 +68,14 @@ struct bio { ...@@ -66,6 +68,14 @@ struct bio {
bio_end_io_t *bi_end_io; bio_end_io_t *bi_end_io;
void *bi_private; void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
* Optional ioc and css associated with this bio. Put on bio
* release. Read comment on top of bio_associate_current().
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
#endif
#if defined(CONFIG_BLK_DEV_INTEGRITY) #if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif #endif
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -803,7 +803,7 @@ config RT_GROUP_SCHED ...@@ -803,7 +803,7 @@ config RT_GROUP_SCHED
endif #CGROUP_SCHED endif #CGROUP_SCHED
config BLK_CGROUP config BLK_CGROUP
tristate "Block IO controller" bool "Block IO controller"
depends on BLOCK depends on BLOCK
default n default n
---help--- ---help---
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册