提交 f97380bc 编写于 作者: O Olaf Kirch 提交者: Linus Torvalds

dm crypt: use smaller bvecs in clones

Allocate smaller clones

With the previous dm-crypt fixes, there is no need for the clone bios to have
the same bvec size as the original - we just need to make them big enough for
the remaining number of pages.  The only requirement is that we clear the
"out" index in convert_context, so that crypt_convert starts storing data at
the right position within the clone bio.
Signed-off-by: NOlaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: NAlasdair G Kergon <agk@redhat.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 2f9941b6
...@@ -379,8 +379,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -379,8 +379,7 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations * This should never violate the device limitations
* May return a smaller bio when running out of pages * May return a smaller bio when running out of pages
*/ */
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
unsigned int *bio_vec_idx)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *clone; struct bio *clone;
...@@ -394,16 +393,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, ...@@ -394,16 +393,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
clone_init(io, clone); clone_init(io, clone);
/* if the last bio was not complete, continue where that one ended */ for (i = 0; i < nr_iovecs; i++) {
clone->bi_idx = *bio_vec_idx;
clone->bi_vcnt = *bio_vec_idx;
clone->bi_size = 0;
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
/* clone->bi_idx pages have already been allocated */
size -= clone->bi_idx * PAGE_SIZE;
for (i = clone->bi_idx; i < nr_iovecs; i++) {
struct bio_vec *bv = bio_iovec_idx(clone, i); struct bio_vec *bv = bio_iovec_idx(clone, i);
bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
...@@ -415,7 +405,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, ...@@ -415,7 +405,7 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
* return a partially allocated bio, the caller will then try * return a partially allocated bio, the caller will then try
* to allocate additional bios while submitting this partial bio * to allocate additional bios while submitting this partial bio
*/ */
if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) if (i == (MIN_BIO_PAGES - 1))
gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
bv->bv_offset = 0; bv->bv_offset = 0;
...@@ -434,12 +424,6 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size, ...@@ -434,12 +424,6 @@ static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size,
return NULL; return NULL;
} }
/*
* Remember the last bio_vec allocated to be able
* to correctly continue after the splitting.
*/
*bio_vec_idx = clone->bi_vcnt;
return clone; return clone;
} }
...@@ -597,7 +581,6 @@ static void process_write(struct crypt_io *io) ...@@ -597,7 +581,6 @@ static void process_write(struct crypt_io *io)
struct convert_context ctx; struct convert_context ctx;
unsigned remaining = base_bio->bi_size; unsigned remaining = base_bio->bi_size;
sector_t sector = base_bio->bi_sector - io->target->begin; sector_t sector = base_bio->bi_sector - io->target->begin;
unsigned bvec_idx = 0;
atomic_inc(&io->pending); atomic_inc(&io->pending);
...@@ -608,13 +591,14 @@ static void process_write(struct crypt_io *io) ...@@ -608,13 +591,14 @@ static void process_write(struct crypt_io *io)
* so repeat the whole process until all the data can be handled. * so repeat the whole process until all the data can be handled.
*/ */
while (remaining) { while (remaining) {
clone = crypt_alloc_buffer(io, base_bio->bi_size, &bvec_idx); clone = crypt_alloc_buffer(io, remaining);
if (unlikely(!clone)) { if (unlikely(!clone)) {
dec_pending(io, -ENOMEM); dec_pending(io, -ENOMEM);
return; return;
} }
ctx.bio_out = clone; ctx.bio_out = clone;
ctx.idx_out = 0;
if (unlikely(crypt_convert(cc, &ctx) < 0)) { if (unlikely(crypt_convert(cc, &ctx) < 0)) {
crypt_free_buffer_pages(cc, clone, clone->bi_size); crypt_free_buffer_pages(cc, clone, clone->bi_size);
...@@ -623,6 +607,9 @@ static void process_write(struct crypt_io *io) ...@@ -623,6 +607,9 @@ static void process_write(struct crypt_io *io)
return; return;
} }
/* crypt_convert should have filled the clone bio */
BUG_ON(ctx.idx_out < clone->bi_vcnt);
clone->bi_sector = cc->start + sector; clone->bi_sector = cc->start + sector;
remaining -= clone->bi_size; remaining -= clone->bi_size;
sector += bio_sectors(clone); sector += bio_sectors(clone);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册