提交 7afafc8a 编写于 作者: A Adrian Hunter 提交者: Jens Axboe

block: Fix secure erase

Commit 288dab8a ("block: add a separate operation type for secure
erase") split REQ_OP_SECURE_ERASE from REQ_OP_DISCARD without considering
all the places REQ_OP_DISCARD was being used to mean either. Fix those.
Signed-off-by: NAdrian Hunter <adrian.hunter@intel.com>
Fixes: 288dab8a ("block: add a separate operation type for secure erase")
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 f6b6a28e
...@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, ...@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
if (bio_op(bio) == REQ_OP_DISCARD) switch (bio_op(bio)) {
goto integrity_clone; case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
if (bio_op(bio) == REQ_OP_WRITE_SAME) { break;
case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
goto integrity_clone; break;
default:
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
break;
} }
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
integrity_clone:
if (bio_integrity(bio_src)) { if (bio_integrity(bio_src)) {
int ret; int ret;
...@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors, ...@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
* Discards need a mutable bio_vec to accommodate the payload * Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands. * required by the DSM TRIM and UNMAP commands.
*/ */
if (bio_op(bio) == REQ_OP_DISCARD) if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs); split = bio_clone_bioset(bio, gfp, bs);
else else
split = bio_clone_fast(bio, gfp, bs); split = bio_clone_fast(bio, gfp, bs);
......
...@@ -172,12 +172,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, ...@@ -172,12 +172,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
struct bio *split, *res; struct bio *split, *res;
unsigned nsegs; unsigned nsegs;
if (bio_op(*bio) == REQ_OP_DISCARD) switch (bio_op(*bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
split = blk_bio_discard_split(q, *bio, bs, &nsegs); split = blk_bio_discard_split(q, *bio, bs, &nsegs);
else if (bio_op(*bio) == REQ_OP_WRITE_SAME) break;
case REQ_OP_WRITE_SAME:
split = blk_bio_write_same_split(q, *bio, bs, &nsegs); split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
else break;
default:
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
break;
}
/* physical segments can be figured out during splitting */ /* physical segments can be figured out during splitting */
res = split ? split : *bio; res = split ? split : *bio;
...@@ -213,7 +219,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -213,7 +219,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* This should probably be returning 0, but blk_add_request_payload() * This should probably be returning 0, but blk_add_request_payload()
* (Christoph!!!!) * (Christoph!!!!)
*/ */
if (bio_op(bio) == REQ_OP_DISCARD) if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
return 1; return 1;
if (bio_op(bio) == REQ_OP_WRITE_SAME) if (bio_op(bio) == REQ_OP_WRITE_SAME)
...@@ -385,7 +391,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -385,7 +391,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
nsegs = 0; nsegs = 0;
cluster = blk_queue_cluster(q); cluster = blk_queue_cluster(q);
if (bio_op(bio) == REQ_OP_DISCARD) { switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
/* /*
* This is a hack - drivers should be neither modifying the * This is a hack - drivers should be neither modifying the
* biovec, nor relying on bi_vcnt - but because of * biovec, nor relying on bi_vcnt - but because of
...@@ -393,19 +401,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -393,19 +401,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
* a payload we need to set up here (thank you Christoph) and * a payload we need to set up here (thank you Christoph) and
* bi_vcnt is really the only way of telling if we need to. * bi_vcnt is really the only way of telling if we need to.
*/ */
if (!bio->bi_vcnt)
if (bio->bi_vcnt) return 0;
goto single_segment; /* Fall through */
case REQ_OP_WRITE_SAME:
return 0;
}
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
single_segment:
*sg = sglist; *sg = sglist;
bvec = bio_iovec(bio); bvec = bio_iovec(bio);
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
return 1; return 1;
default:
break;
} }
for_each_bio(bio) for_each_bio(bio)
......
...@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD)) if (req_op(rq) != req_op(pos))
break; break;
if (rq_data_dir(rq) != rq_data_dir(pos)) if (rq_data_dir(rq) != rq_data_dir(pos))
break; break;
......
...@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) ...@@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
break; break;
if (req_op(next) == REQ_OP_DISCARD || if (req_op(next) == REQ_OP_DISCARD ||
req_op(next) == REQ_OP_SECURE_ERASE ||
req_op(next) == REQ_OP_FLUSH) req_op(next) == REQ_OP_FLUSH)
break; break;
......
...@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) ...@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
/* /*
* We only like normal block requests and discards. * We only like normal block requests and discards.
*/ */
if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
req_op(req) != REQ_OP_SECURE_ERASE) {
blk_dump_rq_flags(req, "MMC bad request"); blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL; return BLKPREP_KILL;
} }
......
...@@ -4,7 +4,9 @@ ...@@ -4,7 +4,9 @@
static inline bool mmc_req_is_special(struct request *req) static inline bool mmc_req_is_special(struct request *req)
{ {
return req && return req &&
(req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); (req_op(req) == REQ_OP_FLUSH ||
req_op(req) == REQ_OP_DISCARD ||
req_op(req) == REQ_OP_SECURE_ERASE);
} }
struct request; struct request;
......
...@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio) ...@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio)
{ {
if (bio && if (bio &&
bio->bi_iter.bi_size && bio->bi_iter.bi_size &&
bio_op(bio) != REQ_OP_DISCARD) bio_op(bio) != REQ_OP_DISCARD &&
bio_op(bio) != REQ_OP_SECURE_ERASE)
return true; return true;
return false; return false;
...@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio) ...@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio)
static inline bool bio_no_advance_iter(struct bio *bio) static inline bool bio_no_advance_iter(struct bio *bio)
{ {
return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
bio_op(bio) == REQ_OP_WRITE_SAME;
} }
static inline bool bio_is_rw(struct bio *bio) static inline bool bio_is_rw(struct bio *bio)
...@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio) ...@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio)
if (bio_op(bio) == REQ_OP_DISCARD) if (bio_op(bio) == REQ_OP_DISCARD)
return 1; return 1;
if (bio_op(bio) == REQ_OP_SECURE_ERASE)
return 1;
if (bio_op(bio) == REQ_OP_WRITE_SAME) if (bio_op(bio) == REQ_OP_WRITE_SAME)
return 1; return 1;
......
...@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) ...@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op) int op)
{ {
if (unlikely(op == REQ_OP_DISCARD)) if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9); return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(op == REQ_OP_WRITE_SAME)) if (unlikely(op == REQ_OP_WRITE_SAME))
...@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, ...@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
if (unlikely(rq->cmd_type != REQ_TYPE_FS)) if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors; return q->limits.max_hw_sectors;
if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) if (!q->limits.chunk_sectors ||
req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq)); return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, offset), return min(blk_max_size_offset(q, offset),
......
...@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
what |= MASK_TC_BIT(op_flags, META); what |= MASK_TC_BIT(op_flags, META);
what |= MASK_TC_BIT(op_flags, PREFLUSH); what |= MASK_TC_BIT(op_flags, PREFLUSH);
what |= MASK_TC_BIT(op_flags, FUA); what |= MASK_TC_BIT(op_flags, FUA);
if (op == REQ_OP_DISCARD) if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
what |= BLK_TC_ACT(BLK_TC_DISCARD); what |= BLK_TC_ACT(BLK_TC_DISCARD);
if (op == REQ_OP_FLUSH) if (op == REQ_OP_FLUSH)
what |= BLK_TC_ACT(BLK_TC_FLUSH); what |= BLK_TC_ACT(BLK_TC_FLUSH);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册