提交 a3ad0a9d 编写于 作者: J Jan Kara 提交者: Jan Kara

block: Remove forced page bouncing under IO

JBD layer wrote back data buffers without setting PageWriteback bit.
Thus standard mechanism for guaranteeing stable pages under IO did not
work. Since JBD is gone now and there is no other user of the
functionality, just remove it.
Acked-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJan Kara <jack@suse.cz>
上级 c290ea01
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
...@@ -176,26 +176,8 @@ static void bounce_end_io_read_isa(struct bio *bio, int err) ...@@ -176,26 +176,8 @@ static void bounce_end_io_read_isa(struct bio *bio, int err)
__bounce_end_io_read(bio, isa_page_pool, err); __bounce_end_io_read(bio, isa_page_pool, err);
} }
#ifdef CONFIG_NEED_BOUNCE_POOL
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
if (bio_data_dir(bio) != WRITE)
return 0;
if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
return 0;
return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
}
#else
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
{
return 0;
}
#endif /* CONFIG_NEED_BOUNCE_POOL */
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool, int force) mempool_t *pool)
{ {
struct bio *bio; struct bio *bio;
int rw = bio_data_dir(*bio_orig); int rw = bio_data_dir(*bio_orig);
...@@ -203,8 +185,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -203,8 +185,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
struct bvec_iter iter; struct bvec_iter iter;
unsigned i; unsigned i;
if (force)
goto bounce;
bio_for_each_segment(from, *bio_orig, iter) bio_for_each_segment(from, *bio_orig, iter)
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
goto bounce; goto bounce;
...@@ -216,7 +196,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -216,7 +196,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio_for_each_segment_all(to, bio, i) { bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page; struct page *page = to->bv_page;
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) if (page_to_pfn(page) <= queue_bounce_pfn(q))
continue; continue;
to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_page = mempool_alloc(pool, q->bounce_gfp);
...@@ -254,7 +234,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -254,7 +234,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{ {
int must_bounce;
mempool_t *pool; mempool_t *pool;
/* /*
...@@ -263,15 +242,13 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) ...@@ -263,15 +242,13 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
if (!bio_has_data(*bio_orig)) if (!bio_has_data(*bio_orig))
return; return;
must_bounce = must_snapshot_stable_pages(q, *bio_orig);
/* /*
* for non-isa bounce case, just check if the bounce pfn is equal * for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case, * to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments * don't waste time iterating over bio segments
*/ */
if (!(q->bounce_gfp & GFP_DMA)) { if (!(q->bounce_gfp & GFP_DMA)) {
if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce) if (queue_bounce_pfn(q) >= blk_max_pfn)
return; return;
pool = page_pool; pool = page_pool;
} else { } else {
...@@ -282,7 +259,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) ...@@ -282,7 +259,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
/* /*
* slow path * slow path
*/ */
__blk_queue_bounce(q, bio_orig, pool, must_bounce); __blk_queue_bounce(q, bio_orig, pool);
} }
EXPORT_SYMBOL(blk_queue_bounce); EXPORT_SYMBOL(blk_queue_bounce);
...@@ -118,9 +118,8 @@ struct bio { ...@@ -118,9 +118,8 @@ struct bio {
#define BIO_USER_MAPPED 4 /* contains user pages */ #define BIO_USER_MAPPED 4 /* contains user pages */
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */ #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
#define BIO_QUIET 6 /* Make BIO Quiet */ #define BIO_QUIET 6 /* Make BIO Quiet */
#define BIO_SNAP_STABLE 7 /* bio data must be snapshotted during write */ #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
#define BIO_CHAIN 8 /* chained bio, ->bi_remaining in effect */ #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
#define BIO_REFFED 9 /* bio has elevated ->bi_cnt */
/* /*
* Flags starting here get preserved by bio_reset() - this includes * Flags starting here get preserved by bio_reset() - this includes
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册