提交 933f01d4 编写于 作者: M Milan Broz 提交者: Alasdair G Kergon

dm crypt: avoid unnecessary wait when splitting bio

Don't wait between submitting crypt requests for a bio unless
we are short of memory.

There are two situations when we must split an encrypted bio:
  1) there are no free pages;
  2) the new bio would violate underlying device restrictions
(e.g. max hw segments).

In case (2) we do not need to wait.

Add output variable to crypt_alloc_buffer() to distinguish between
these cases.
Signed-off-by: NMilan Broz <mbroz@redhat.com>
Signed-off-by: NAlasdair G Kergon <agk@redhat.com>
上级 c8081618
...@@ -457,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio) ...@@ -457,9 +457,11 @@ static void dm_crypt_bio_destructor(struct bio *bio)
/* /*
* Generate a new unfragmented bio with the given size * Generate a new unfragmented bio with the given size
* This should never violate the device limitations * This should never violate the device limitations
* May return a smaller bio when running out of pages * May return a smaller bio when running out of pages, indicated by
* *out_of_pages set to 1.
*/ */
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
unsigned *out_of_pages)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *clone; struct bio *clone;
...@@ -473,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) ...@@ -473,11 +475,14 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
return NULL; return NULL;
clone_init(io, clone); clone_init(io, clone);
*out_of_pages = 0;
for (i = 0; i < nr_iovecs; i++) { for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask); page = mempool_alloc(cc->page_pool, gfp_mask);
if (!page) if (!page) {
*out_of_pages = 1;
break; break;
}
/* /*
* if additional pages cannot be allocated without waiting, * if additional pages cannot be allocated without waiting,
...@@ -696,6 +701,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -696,6 +701,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *clone; struct bio *clone;
int crypt_finished; int crypt_finished;
unsigned out_of_pages = 0;
unsigned remaining = io->base_bio->bi_size; unsigned remaining = io->base_bio->bi_size;
int r; int r;
...@@ -710,7 +716,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -710,7 +716,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
* so repeat the whole process until all the data can be handled. * so repeat the whole process until all the data can be handled.
*/ */
while (remaining) { while (remaining) {
clone = crypt_alloc_buffer(io, remaining); clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
if (unlikely(!clone)) { if (unlikely(!clone)) {
io->error = -ENOMEM; io->error = -ENOMEM;
break; break;
...@@ -737,11 +743,15 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) ...@@ -737,11 +743,15 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
break; break;
} }
/* out of memory -> run queues */ /*
if (unlikely(remaining)) { * Out of memory -> run queues
wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); * But don't wait if split was due to the io size restriction
*/
if (unlikely(out_of_pages))
congestion_wait(WRITE, HZ/100); congestion_wait(WRITE, HZ/100);
}
if (unlikely(remaining))
wait_event(cc->writeq, !atomic_read(&io->ctx.pending));
} }
crypt_dec_pending(io); crypt_dec_pending(io);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册