提交 b35f8caa 编写于 作者: M Milan Broz 提交者: Alasdair G Kergon

dm crypt: wait for endio to complete before destruction

The following oops has been reported when dm-crypt runs over a loop device.

...
[   70.381058] Process loop0 (pid: 4268, ti=cf3b2000 task=cf1cc1f0 task.ti=cf3b2000)
...
[   70.381058] Call Trace:
[   70.381058]  [<d0d76601>] ? crypt_dec_pending+0x5e/0x62 [dm_crypt]
[   70.381058]  [<d0d767b8>] ? crypt_endio+0xa2/0xaa [dm_crypt]
[   70.381058]  [<d0d76716>] ? crypt_endio+0x0/0xaa [dm_crypt]
[   70.381058]  [<c01a2f24>] ? bio_endio+0x2b/0x2e
[   70.381058]  [<d0806530>] ? dec_pending+0x224/0x23b [dm_mod]
[   70.381058]  [<d08066e4>] ? clone_endio+0x79/0xa4 [dm_mod]
[   70.381058]  [<d080666b>] ? clone_endio+0x0/0xa4 [dm_mod]
[   70.381058]  [<c01a2f24>] ? bio_endio+0x2b/0x2e
[   70.381058]  [<c02bad86>] ? loop_thread+0x380/0x3b7
[   70.381058]  [<c02ba8a1>] ? do_lo_send_aops+0x0/0x165
[   70.381058]  [<c013754f>] ? autoremove_wake_function+0x0/0x33
[   70.381058]  [<c02baa06>] ? loop_thread+0x0/0x3b7

When a table is being replaced, it waits for I/O to complete
before destroying the mempool, but the endio function doesn't
call mempool_free() until after completing the bio.

Fix it by swapping the order of those two operations.

The same problem occurs in dm.c with md referenced after dec_pending.
Again, we swap the order.

Cc: stable@kernel.org
Signed-off-by: NMilan Broz <mbroz@redhat.com>
Signed-off-by: NAlasdair G Kergon <agk@redhat.com>
上级 b2174eeb
...@@ -568,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io) ...@@ -568,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
static void crypt_dec_pending(struct dm_crypt_io *io) static void crypt_dec_pending(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio;
struct dm_crypt_io *base_io = io->base_io;
int error = io->error;
if (!atomic_dec_and_test(&io->pending)) if (!atomic_dec_and_test(&io->pending))
return; return;
if (likely(!io->base_io)) mempool_free(io, cc->io_pool);
bio_endio(io->base_bio, io->error);
if (likely(!base_io))
bio_endio(base_bio, error);
else { else {
if (io->error && !io->base_io->error) if (error && !base_io->error)
io->base_io->error = io->error; base_io->error = error;
crypt_dec_pending(io->base_io); crypt_dec_pending(base_io);
} }
mempool_free(io, cc->io_pool);
} }
/* /*
......
...@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md) ...@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
static void dec_pending(struct dm_io *io, int error) static void dec_pending(struct dm_io *io, int error)
{ {
unsigned long flags; unsigned long flags;
int io_error;
struct bio *bio;
struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */ /* Push-back supersedes any I/O errors */
if (error && !(io->error > 0 && __noflush_suspending(io->md))) if (error && !(io->error > 0 && __noflush_suspending(md)))
io->error = error; io->error = error;
if (atomic_dec_and_test(&io->io_count)) { if (atomic_dec_and_test(&io->io_count)) {
...@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error) ...@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
* This must be handled before the sleeper on * This must be handled before the sleeper on
* suspend queue merges the pushback list. * suspend queue merges the pushback list.
*/ */
spin_lock_irqsave(&io->md->pushback_lock, flags); spin_lock_irqsave(&md->pushback_lock, flags);
if (__noflush_suspending(io->md)) if (__noflush_suspending(md))
bio_list_add(&io->md->pushback, io->bio); bio_list_add(&md->pushback, io->bio);
else else
/* noflush suspend was interrupted. */ /* noflush suspend was interrupted. */
io->error = -EIO; io->error = -EIO;
spin_unlock_irqrestore(&io->md->pushback_lock, flags); spin_unlock_irqrestore(&md->pushback_lock, flags);
} }
end_io_acct(io); end_io_acct(io);
if (io->error != DM_ENDIO_REQUEUE) { io_error = io->error;
trace_block_bio_complete(io->md->queue, io->bio); bio = io->bio;
bio_endio(io->bio, io->error); free_io(md, io);
}
if (io_error != DM_ENDIO_REQUEUE) {
trace_block_bio_complete(md->queue, bio);
free_io(io->md, io); bio_endio(bio, io_error);
}
} }
} }
...@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error) ...@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
{ {
int r = 0; int r = 0;
struct dm_target_io *tio = bio->bi_private; struct dm_target_io *tio = bio->bi_private;
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md; struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io; dm_endio_fn endio = tio->ti->type->end_io;
...@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error) ...@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
} }
} }
dec_pending(tio->io, error);
/* /*
* Store md for cleanup instead of tio which is about to get freed. * Store md for cleanup instead of tio which is about to get freed.
*/ */
bio->bi_private = md->bs; bio->bi_private = md->bs;
bio_put(bio);
free_tio(md, tio); free_tio(md, tio);
bio_put(bio);
dec_pending(io, error);
} }
static sector_t max_io_len(struct mapped_device *md, static sector_t max_io_len(struct mapped_device *md,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册