提交 07888c66 编写于 作者: C Christoph Hellwig 提交者: Jens Axboe

block: pass a block_device and opf to bio_alloc

Pass the block_device and operation that we plan to use this bio for to
bio_alloc to optimize the assignment.  NULL/0 can be passed, both for the
passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.

Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-18-hch@lst.deSigned-off-by: NJens Axboe <axboe@kernel.dk>
上级 b77c88c2
...@@ -347,10 +347,7 @@ EXPORT_SYMBOL(bio_chain); ...@@ -347,10 +347,7 @@ EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp) unsigned int nr_pages, unsigned int opf, gfp_t gfp)
{ {
struct bio *new = bio_alloc(gfp, nr_pages); struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
bio_set_dev(new, bdev);
new->bi_opf = opf;
if (bio) { if (bio) {
bio_chain(bio, new); bio_chain(bio, new);
......
...@@ -256,9 +256,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, ...@@ -256,9 +256,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
} }
atomic_inc(&dio->ref); atomic_inc(&dio->ref);
submit_bio(bio); submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, nr_pages); bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
bio_set_dev(bio, bdev);
bio->bi_opf = opf;
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -1279,7 +1279,8 @@ static void one_flush_endio(struct bio *bio) ...@@ -1279,7 +1279,8 @@ static void one_flush_endio(struct bio *bio)
static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx) static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{ {
struct bio *bio = bio_alloc(GFP_NOIO, 0); struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO); struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
if (!octx) { if (!octx) {
...@@ -1297,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont ...@@ -1297,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device; octx->device = device;
octx->ctx = ctx; octx->ctx = ctx;
bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx; bio->bi_private = octx;
bio->bi_end_io = one_flush_endio; bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies; device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags); set_bit(FLUSH_PENDING, &device->flags);
...@@ -1685,11 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device, ...@@ -1685,11 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
* generated bio, but a bio allocated on behalf of the peer. * generated bio, but a bio allocated on behalf of the peer.
*/ */
next_bio: next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages); bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
GFP_NOIO);
/* > peer_req->i.sector, unless this is the first bio */ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req; bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio; bio->bi_end_io = drbd_peer_request_endio;
......
...@@ -149,7 +149,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, ...@@ -149,7 +149,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev; priv->sess_dev = sess_dev;
priv->id = id; priv->id = id;
bio = bio_alloc(GFP_KERNEL, 1); bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen, if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) { offset_in_page(data)) != datalen) {
rnbd_srv_err(sess_dev, "Failed to map data to bio\n"); rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
...@@ -159,13 +160,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess, ...@@ -159,13 +160,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
bio->bi_end_io = rnbd_dev_bi_end_io; bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = priv; bio->bi_private = priv;
bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
bio->bi_iter.bi_sector = le64_to_cpu(msg->sector); bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size); bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR || prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio); usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
bio_set_prio(bio, prio); bio_set_prio(bio, prio);
bio_set_dev(bio, sess_dev->rnbd_dev->bdev);
submit_bio(bio); submit_bio(bio);
......
...@@ -1326,13 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1326,13 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
pages[i]->page, pages[i]->page,
seg[i].nsec << 9, seg[i].nsec << 9,
seg[i].offset) == 0)) { seg[i].offset) == 0)) {
bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i)); bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
operation | operation_flags,
GFP_KERNEL);
biolist[nbio++] = bio; biolist[nbio++] = bio;
bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number; bio->bi_iter.bi_sector = preq.sector_number;
bio_set_op_attrs(bio, operation, operation_flags);
} }
preq.sector_number += seg[i].nsec; preq.sector_number += seg[i].nsec;
...@@ -1342,12 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1342,12 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (!bio) { if (!bio) {
BUG_ON(operation_flags != REQ_PREFLUSH); BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
GFP_KERNEL);
biolist[nbio++] = bio; biolist[nbio++] = bio;
bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
} }
atomic_set(&pending_req->pendcnt, nbio); atomic_set(&pending_req->pendcnt, nbio);
......
...@@ -616,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, ...@@ -616,24 +616,21 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
{ {
struct bio *bio; struct bio *bio;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
GFP_NOIO);
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
bio_set_dev(bio, zram->bdev);
if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
if (!parent) { if (!parent)
bio->bi_opf = REQ_OP_READ;
bio->bi_end_io = zram_page_end_io; bio->bi_end_io = zram_page_end_io;
} else { else
bio->bi_opf = parent->bi_opf;
bio_chain(bio, parent); bio_chain(bio, parent);
}
submit_bio(bio); submit_bio(bio);
return 1; return 1;
......
...@@ -217,14 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry, ...@@ -217,14 +217,12 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
void *ptr; void *ptr;
size_t ret; size_t ret;
bio = bio_alloc(GFP_KERNEL, 1); bio = bio_alloc(lc->logdev->bdev, 1, REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
log_end_super : log_end_io; log_end_super : log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (!page) { if (!page) {
...@@ -271,13 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry, ...@@ -271,13 +269,12 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
atomic_inc(&lc->io_blocks); atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, bio_pages); bio = bio_alloc(lc->logdev->bdev, bio_pages, REQ_OP_WRITE,
GFP_KERNEL);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < bio_pages; i++) { for (i = 0; i < bio_pages; i++) {
pg_datalen = min_t(int, datalen, PAGE_SIZE); pg_datalen = min_t(int, datalen, PAGE_SIZE);
...@@ -353,13 +350,12 @@ static int log_one_block(struct log_writes_c *lc, ...@@ -353,13 +350,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out; goto out;
atomic_inc(&lc->io_blocks); atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt)); bio = bio_alloc(lc->logdev->bdev, bio_max_segs(block->vec_cnt),
REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) { for (i = 0; i < block->vec_cnt; i++) {
/* /*
...@@ -371,14 +367,13 @@ static int log_one_block(struct log_writes_c *lc, ...@@ -371,14 +367,13 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) { if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks); atomic_inc(&lc->io_blocks);
submit_bio(bio); submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, bio = bio_alloc(lc->logdev->bdev,
bio_max_segs(block->vec_cnt - i)); bio_max_segs(block->vec_cnt - i),
REQ_OP_WRITE, GFP_KERNEL);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page, ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0); block->vecs[i].bv_len, 0);
......
...@@ -1177,13 +1177,12 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) ...@@ -1177,13 +1177,12 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return; return;
} }
discard_parent = bio_alloc(GFP_NOIO, 1); discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
discard_parent->bi_end_io = passdown_endio; discard_parent->bi_end_io = passdown_endio;
discard_parent->bi_private = m; discard_parent->bi_private = m;
if (m->maybe_shared)
if (m->maybe_shared) passdown_double_checking_shared_status(m, discard_parent);
passdown_double_checking_shared_status(m, discard_parent); else {
else {
struct discard_op op; struct discard_op op;
begin_discard(&op, tc, discard_parent); begin_discard(&op, tc, discard_parent);
......
...@@ -550,7 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, ...@@ -550,7 +550,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
if (!mblk) if (!mblk)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(dev->bdev, 1, REQ_OP_READ | REQ_META | REQ_PRIO,
GFP_NOIO);
spin_lock(&zmd->mblk_lock); spin_lock(&zmd->mblk_lock);
...@@ -574,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, ...@@ -574,10 +575,8 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */ /* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk; bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io; bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio); submit_bio(bio);
...@@ -721,15 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, ...@@ -721,15 +720,14 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
if (dmz_bdev_is_dying(dev)) if (dmz_bdev_is_dying(dev))
return -EIO; return -EIO;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(dev->bdev, 1, REQ_OP_WRITE | REQ_META | REQ_PRIO,
GFP_NOIO);
set_bit(DMZ_META_WRITING, &mblk->state); set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk; bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io; bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio); submit_bio(bio);
...@@ -751,10 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op, ...@@ -751,10 +749,9 @@ static int dmz_rdwr_block(struct dmz_dev *dev, int op,
if (dmz_bdev_is_dying(dev)) if (dmz_bdev_is_dying(dev))
return -EIO; return -EIO;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(dev->bdev, 1, op | REQ_SYNC | REQ_META | REQ_PRIO,
GFP_NOIO);
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
bio_put(bio); bio_put(bio);
......
...@@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) ...@@ -105,12 +105,12 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
* parent bio. Otherwise directly call nd_region flush. * parent bio. Otherwise directly call nd_region flush.
*/ */
if (bio && bio->bi_iter.bi_sector != -1) { if (bio && bio->bi_iter.bi_sector != -1) {
struct bio *child = bio_alloc(GFP_ATOMIC, 0); struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
GFP_ATOMIC);
if (!child) if (!child)
return -ENOMEM; return -ENOMEM;
bio_copy_dev(child, bio); bio_clone_blkg_association(child, bio);
child->bi_opf = REQ_PREFLUSH;
child->bi_iter.bi_sector = -1; child->bi_iter.bi_sector = -1;
bio_chain(child, bio); bio_chain(child, bio);
submit_bio(child); submit_bio(child);
......
...@@ -268,14 +268,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) ...@@ -268,14 +268,15 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) { if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio; bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_opf = op;
} else { } else {
bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), op,
GFP_KERNEL);
} }
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
bio->bi_opf = op;
blk_start_plug(&plug); blk_start_plug(&plug);
if (req->metadata_len) if (req->metadata_len)
...@@ -296,10 +297,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) ...@@ -296,10 +297,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
} }
} }
bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt)); bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt),
bio_set_dev(bio, req->ns->bdev); op, GFP_KERNEL);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_opf = op;
bio_chain(bio, prev); bio_chain(bio, prev);
submit_bio(prev); submit_bio(prev);
......
...@@ -207,11 +207,12 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) ...@@ -207,11 +207,12 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (nvmet_use_inline_bvec(req)) { if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio; bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio->bi_opf = req_op(rq);
} else { } else {
bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt)); bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
GFP_KERNEL);
bio->bi_end_io = bio_put; bio->bi_end_io = bio_put;
} }
bio->bi_opf = req_op(rq);
for_each_sg(req->sg, sg, req->sg_cnt, i) { for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
......
...@@ -522,6 +522,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio) ...@@ -522,6 +522,7 @@ static void nvmet_bdev_zone_append_bio_done(struct bio *bio)
void nvmet_bdev_execute_zone_append(struct nvmet_req *req) void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
{ {
sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
const unsigned int op = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
u16 status = NVME_SC_SUCCESS; u16 status = NVME_SC_SUCCESS;
unsigned int total_len = 0; unsigned int total_len = 0;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -552,13 +553,12 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) ...@@ -552,13 +553,12 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
if (nvmet_use_inline_bvec(req)) { if (nvmet_use_inline_bvec(req)) {
bio = &req->z.inline_bio; bio = &req->z.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio->bi_opf = op;
} else { } else {
bio = bio_alloc(GFP_KERNEL, req->sg_cnt); bio = bio_alloc(req->ns->bdev, req->sg_cnt, op, GFP_KERNEL);
} }
bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
bio->bi_end_io = nvmet_bdev_zone_append_bio_done; bio->bi_end_io = nvmet_bdev_zone_append_bio_done;
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sect; bio->bi_iter.bi_sector = sect;
bio->bi_private = req; bio->bi_private = req;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
......
...@@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, ...@@ -494,7 +494,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
if (!map_req) if (!map_req)
return NULL; return NULL;
bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
if (!bio) { if (!bio) {
ufshpb_put_req(hpb, map_req); ufshpb_put_req(hpb, map_req);
return NULL; return NULL;
...@@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) ...@@ -2050,7 +2050,7 @@ static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
INIT_LIST_HEAD(&pre_req->list_req); INIT_LIST_HEAD(&pre_req->list_req);
pre_req->req = NULL; pre_req->req = NULL;
pre_req->bio = bio_alloc(GFP_KERNEL, 1); pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
if (!pre_req->bio) if (!pre_req->bio)
goto release_mem; goto release_mem;
......
...@@ -415,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd) ...@@ -415,10 +415,9 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
if (immed) if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush; bio->bi_end_io = iblock_end_io_flush;
bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed) if (!immed)
bio->bi_private = cmd; bio->bi_private = cmd;
submit_bio(bio); submit_bio(bio);
......
...@@ -4029,8 +4029,9 @@ static int write_dev_supers(struct btrfs_device *device, ...@@ -4029,8 +4029,9 @@ static int write_dev_supers(struct btrfs_device *device,
* to do I/O, so we don't lose the ability to do integrity * to do I/O, so we don't lose the ability to do integrity
* checking. * checking.
*/ */
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(device->bdev, 1,
bio_set_dev(bio, device->bdev); REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
GFP_NOFS);
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
bio->bi_private = device; bio->bi_private = device;
bio->bi_end_io = btrfs_end_super_write; bio->bi_end_io = btrfs_end_super_write;
...@@ -4042,7 +4043,6 @@ static int write_dev_supers(struct btrfs_device *device, ...@@ -4042,7 +4043,6 @@ static int write_dev_supers(struct btrfs_device *device,
* go down lazy and there's a short window where the on-disk * go down lazy and there's a short window where the on-disk
* copies might still contain the older version. * copies might still contain the older version.
*/ */
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO;
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA; bio->bi_opf |= REQ_FUA;
......
...@@ -3024,12 +3024,16 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, ...@@ -3024,12 +3024,16 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
clear_buffer_write_io_error(bh); clear_buffer_write_io_error(bh);
bio = bio_alloc(GFP_NOIO, 1); if (buffer_meta(bh))
op_flags |= REQ_META;
if (buffer_prio(bh))
op_flags |= REQ_PRIO;
bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint; bio->bi_write_hint = write_hint;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
...@@ -3038,12 +3042,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, ...@@ -3038,12 +3042,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
bio->bi_end_io = end_bio_bh_io_sync; bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh; bio->bi_private = bh;
if (buffer_meta(bh))
op_flags |= REQ_META;
if (buffer_prio(bh))
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
/* Take care of bh's that straddle the end of the device */ /* Take care of bh's that straddle the end of the device */
guard_bio_eod(bio); guard_bio_eod(bio);
......
...@@ -54,7 +54,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, ...@@ -54,7 +54,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
int num_pages = 0; int num_pages = 0;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE,
GFP_NOFS);
while (len) { while (len) {
unsigned int blocks_this_page = min(len, blocks_per_page); unsigned int blocks_this_page = min(len, blocks_per_page);
...@@ -62,10 +63,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, ...@@ -62,10 +63,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
if (num_pages == 0) { if (num_pages == 0) {
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
pblk << (blockbits - SECTOR_SHIFT); pblk << (blockbits - SECTOR_SHIFT);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
} }
ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
if (WARN_ON(ret != bytes_this_page)) { if (WARN_ON(ret != bytes_this_page)) {
...@@ -82,6 +81,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, ...@@ -82,6 +81,8 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
if (err) if (err)
goto out; goto out;
bio_reset(bio); bio_reset(bio);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_opf = REQ_OP_WRITE;
num_pages = 0; num_pages = 0;
} }
} }
...@@ -150,12 +151,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, ...@@ -150,12 +151,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return -EINVAL; return -EINVAL;
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
bio = bio_alloc(GFP_NOFS, nr_pages); bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do { do {
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
i = 0; i = 0;
offset = 0; offset = 0;
...@@ -183,6 +182,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, ...@@ -183,6 +182,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
if (err) if (err)
goto out; goto out;
bio_reset(bio); bio_reset(bio);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_opf = REQ_OP_WRITE;
} while (len != 0); } while (len != 0);
err = 0; err = 0;
out: out:
......
...@@ -396,11 +396,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, ...@@ -396,11 +396,8 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
* bio_alloc() is guaranteed to return a bio when allowed to sleep and * bio_alloc() is guaranteed to return a bio when allowed to sleep and
* we request a valid number of vectors. * we request a valid number of vectors.
*/ */
bio = bio_alloc(GFP_KERNEL, nr_vecs); bio = bio_alloc(bdev, nr_vecs, dio->op | dio->op_flags, GFP_KERNEL);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = first_sector; bio->bi_iter.bi_sector = first_sector;
bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async) if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio; bio->bi_end_io = dio_bio_end_aio;
else else
......
...@@ -1371,15 +1371,14 @@ static void z_erofs_submit_queue(struct super_block *sb, ...@@ -1371,15 +1371,14 @@ static void z_erofs_submit_queue(struct super_block *sb,
} }
if (!bio) { if (!bio) {
bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_decompressqueue_endio; bio->bi_end_io = z_erofs_decompressqueue_endio;
bio_set_dev(bio, mdev.m_bdev);
last_bdev = mdev.m_bdev; last_bdev = mdev.m_bdev;
bio->bi_iter.bi_sector = (sector_t)cur << bio->bi_iter.bi_sector = (sector_t)cur <<
LOG_SECTORS_PER_BLOCK; LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private; bio->bi_private = bi_private;
bio->bi_opf = REQ_OP_READ;
if (f->readahead) if (f->readahead)
bio->bi_opf |= REQ_RAHEAD; bio->bi_opf |= REQ_RAHEAD;
++nr_bios; ++nr_bios;
......
...@@ -398,10 +398,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io, ...@@ -398,10 +398,9 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* bio_alloc will _always_ be able to allocate a bio if * bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/ */
bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio; bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end); bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio; io->io_bio = bio;
......
...@@ -365,15 +365,15 @@ int ext4_mpage_readpages(struct inode *inode, ...@@ -365,15 +365,15 @@ int ext4_mpage_readpages(struct inode *inode,
* bio_alloc will _always_ be able to allocate a bio if * bio_alloc will _always_ be able to allocate a bio if
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
*/ */
bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages)); bio = bio_alloc(bdev, bio_max_segs(nr_pages),
REQ_OP_READ, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block, fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL); GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, page->index); ext4_set_bio_post_read_ctx(bio, inode, page->index);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io; bio->bi_end_io = mpage_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, if (rac)
rac ? REQ_RAHEAD : 0); bio->bi_opf |= REQ_RAHEAD;
} }
length = first_hole << blkbits; length = first_hole << blkbits;
......
...@@ -265,10 +265,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, ...@@ -265,10 +265,9 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
bio_end_io_t *end_io) bio_end_io_t *end_io)
{ {
struct super_block *sb = sdp->sd_vfs; struct super_block *sb = sdp->sd_vfs;
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS); struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io; bio->bi_end_io = end_io;
bio->bi_private = sdp; bio->bi_private = sdp;
...@@ -489,10 +488,9 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) ...@@ -489,10 +488,9 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
{ {
struct bio *new; struct bio *new;
new = bio_alloc(GFP_NOIO, nr_iovecs); new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
bio_copy_dev(new, prev); bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev); new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint; new->bi_write_hint = prev->bi_write_hint;
bio_chain(new, prev); bio_chain(new, prev);
submit_bio(prev); submit_bio(prev);
......
...@@ -222,9 +222,8 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], ...@@ -222,9 +222,8 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
struct buffer_head *bh = *bhs; struct buffer_head *bh = *bhs;
struct bio *bio; struct bio *bio;
bio = bio_alloc(GFP_NOIO, num); bio = bio_alloc(bh->b_bdev, num, op | op_flags, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
while (num > 0) { while (num > 0) {
bh = *bhs; bh = *bhs;
if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
...@@ -235,7 +234,6 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], ...@@ -235,7 +234,6 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
num--; num--;
} }
bio->bi_end_io = gfs2_meta_read_endio; bio->bi_end_io = gfs2_meta_read_endio;
bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio); submit_bio(bio);
} }
} }
......
...@@ -251,14 +251,12 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) ...@@ -251,14 +251,12 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
ClearPageDirty(page); ClearPageDirty(page);
lock_page(page); lock_page(page);
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio_set_dev(bio, sb->s_bdev);
bio_add_page(bio, page, PAGE_SIZE, 0); bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page; bio->bi_end_io = end_bio_io_page;
bio->bi_private = page; bio->bi_private = page;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
submit_bio(bio); submit_bio(bio);
wait_on_page_locked(page); wait_on_page_locked(page);
bio_put(bio); bio_put(bio);
......
...@@ -63,10 +63,8 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, ...@@ -63,10 +63,8 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
offset = start & (io_size - 1); offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(sb->s_bdev, 1, op | op_flags, GFP_NOIO);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, sb->s_bdev);
bio_set_op_attrs(bio, op, op_flags);
if (op != WRITE && data) if (op != WRITE && data)
*data = (u8 *)buf + offset; *data = (u8 *)buf + offset;
......
...@@ -290,19 +290,20 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, ...@@ -290,19 +290,20 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (ctx->rac) /* same as readahead_gfp_mask */ if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN; gfp |= __GFP_NORETRY | __GFP_NOWARN;
ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs)); ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
REQ_OP_READ, gfp);
/* /*
* If the bio_alloc fails, try it again for a single page to * If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates * avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does. * what do_mpage_readpage does.
*/ */
if (!ctx->bio) if (!ctx->bio) {
ctx->bio = bio_alloc(orig_gfp, 1); ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
ctx->bio->bi_opf = REQ_OP_READ; orig_gfp);
}
if (ctx->rac) if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector; ctx->bio->bi_iter.bi_sector = sector;
bio_set_dev(ctx->bio, iomap->bdev);
ctx->bio->bi_end_io = iomap_read_end_io; ctx->bio->bi_end_io = iomap_read_end_io;
bio_add_folio(ctx->bio, folio, plen, poff); bio_add_folio(ctx->bio, folio, plen, poff);
} }
...@@ -1226,10 +1227,9 @@ iomap_chain_bio(struct bio *prev) ...@@ -1226,10 +1227,9 @@ iomap_chain_bio(struct bio *prev)
{ {
struct bio *new; struct bio *new;
new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
bio_copy_dev(new, prev);/* also copies over blkcg information */ bio_clone_blkg_association(new, prev);
new->bi_iter.bi_sector = bio_end_sector(prev); new->bi_iter.bi_sector = bio_end_sector(prev);
new->bi_opf = prev->bi_opf;
new->bi_write_hint = prev->bi_write_hint; new->bi_write_hint = prev->bi_write_hint;
bio_chain(prev, new); bio_chain(prev, new);
......
...@@ -183,15 +183,13 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio, ...@@ -183,15 +183,13 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
int flags = REQ_SYNC | REQ_IDLE; int flags = REQ_SYNC | REQ_IDLE;
struct bio *bio; struct bio *bio;
bio = bio_alloc(GFP_KERNEL, 1); bio = bio_alloc(iter->iomap.bdev, 1, REQ_OP_WRITE | flags, GFP_KERNEL);
bio_set_dev(bio, iter->iomap.bdev);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio; bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io; bio->bi_end_io = iomap_dio_bio_end_io;
get_page(page); get_page(page);
__bio_add_page(bio, page, len, 0); __bio_add_page(bio, page, len, 0);
bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
iomap_dio_submit_bio(iter, dio, bio, pos); iomap_dio_submit_bio(iter, dio, bio, pos);
} }
...@@ -309,14 +307,12 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, ...@@ -309,14 +307,12 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out; goto out;
} }
bio = bio_alloc(GFP_KERNEL, nr_pages); bio = bio_alloc(iomap->bdev, nr_pages, bio_opf, GFP_KERNEL);
bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector = iomap_sector(iomap, pos); bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
bio->bi_write_hint = dio->iocb->ki_hint; bio->bi_write_hint = dio->iocb->ki_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio; bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio; bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io; bio->bi_end_io = iomap_dio_bio_end_io;
bio->bi_opf = bio_opf;
ret = bio_iov_iter_get_pages(bio, dio->submit.iter); ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -1980,17 +1980,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) ...@@ -1980,17 +1980,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD; bp->l_flag |= lbmREAD;
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone; bio->bi_end_io = lbmIODone;
bio->bi_private = bp; bio->bi_private = bp;
bio->bi_opf = REQ_OP_READ;
/*check if journaling to disk has been disabled*/ /*check if journaling to disk has been disabled*/
if (log->no_integrity) { if (log->no_integrity) {
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
...@@ -2125,16 +2121,13 @@ static void lbmStartIO(struct lbuf * bp) ...@@ -2125,16 +2121,13 @@ static void lbmStartIO(struct lbuf * bp)
jfs_info("lbmStartIO"); jfs_info("lbmStartIO");
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
bio->bi_end_io = lbmIODone; bio->bi_end_io = lbmIODone;
bio->bi_private = bp; bio->bi_private = bp;
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
/* check if journaling to disk has been disabled */ /* check if journaling to disk has been disabled */
if (log->no_integrity) { if (log->no_integrity) {
......
...@@ -417,12 +417,10 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) ...@@ -417,12 +417,10 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
} }
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io; bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page; bio->bi_private = page;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* Don't call bio_add_page yet, we may add to this vec */ /* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset; bio_offset = offset;
...@@ -497,13 +495,12 @@ static int metapage_readpage(struct file *fp, struct page *page) ...@@ -497,13 +495,12 @@ static int metapage_readpage(struct file *fp, struct page *page)
if (bio) if (bio)
submit_bio(bio); submit_bio(bio);
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
bio_set_dev(bio, inode->i_sb->s_bdev); GFP_NOFS);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9); pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io; bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page; bio->bi_private = page;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
len = xlen << inode->i_blkbits; len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits; offset = block_offset << inode->i_blkbits;
if (bio_add_page(bio, page, len, offset) < len) if (bio_add_page(bio, page, len, offset) < len)
......
...@@ -273,10 +273,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) ...@@ -273,10 +273,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
page)) page))
goto out; goto out;
} }
args->bio = bio_alloc(gfp, bio_max_segs(args->nr_pages)); args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), 0,
gfp);
if (args->bio == NULL) if (args->bio == NULL)
goto confused; goto confused;
bio_set_dev(args->bio, bdev);
args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
} }
...@@ -586,8 +586,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, ...@@ -586,8 +586,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
page, wbc)) page, wbc))
goto out; goto out;
} }
bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); bio = bio_alloc(bdev, BIO_MAX_VECS, 0, GFP_NOFS);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
wbc_init_bio(wbc, bio); wbc_init_bio(wbc, bio);
......
...@@ -154,12 +154,10 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, ...@@ -154,12 +154,10 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
retry: retry:
if (!bio) { if (!bio) {
bio = bio_alloc(GFP_NOIO, bio_max_segs(npg)); bio = bio_alloc(map->bdev, bio_max_segs(npg), rw, GFP_NOIO);
bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT; bio->bi_iter.bi_sector = disk_addr >> SECTOR_SHIFT;
bio_set_dev(bio, map->bdev);
bio->bi_end_io = end_io; bio->bi_end_io = end_io;
bio->bi_private = par; bio->bi_private = par;
bio_set_op_attrs(bio, rw, 0);
} }
if (bio_add_page(bio, page, *len, offset) < *len) { if (bio_add_page(bio, page, *len, offset) < *len) {
bio = bl_submit_bio(bio); bio = bl_submit_bio(bio);
......
...@@ -391,8 +391,8 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, ...@@ -391,8 +391,8 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
BUG_ON(wi->nr_vecs <= 0); BUG_ON(wi->nr_vecs <= 0);
repeat: repeat:
if (!wi->bio) { if (!wi->bio) {
wi->bio = bio_alloc(GFP_NOIO, wi->nr_vecs); wi->bio = bio_alloc(wi->nilfs->ns_bdev, wi->nr_vecs, 0,
bio_set_dev(wi->bio, wi->nilfs->ns_bdev); GFP_NOIO);
wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) << wi->bio->bi_iter.bi_sector = (wi->blocknr + wi->end) <<
(wi->nilfs->ns_blocksize_bits - 9); (wi->nilfs->ns_blocksize_bits - 9);
} }
......
...@@ -1485,15 +1485,13 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, ...@@ -1485,15 +1485,13 @@ int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
lbo = ((u64)lcn << cluster_bits) + off; lbo = ((u64)lcn << cluster_bits) + off;
len = ((u64)clen << cluster_bits) - off; len = ((u64)clen << cluster_bits) - off;
new_bio: new_bio:
new = bio_alloc(GFP_NOFS, nr_pages - page_idx); new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
if (bio) { if (bio) {
bio_chain(bio, new); bio_chain(bio, new);
submit_bio(bio); submit_bio(bio);
} }
bio = new; bio = new;
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = lbo >> 9; bio->bi_iter.bi_sector = lbo >> 9;
bio->bi_opf = op;
while (len) { while (len) {
off = vbo & (PAGE_SIZE - 1); off = vbo & (PAGE_SIZE - 1);
...@@ -1584,14 +1582,12 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run) ...@@ -1584,14 +1582,12 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
lbo = (u64)lcn << cluster_bits; lbo = (u64)lcn << cluster_bits;
len = (u64)clen << cluster_bits; len = (u64)clen << cluster_bits;
new_bio: new_bio:
new = bio_alloc(GFP_NOFS, BIO_MAX_VECS); new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
if (bio) { if (bio) {
bio_chain(bio, new); bio_chain(bio, new);
submit_bio(bio); submit_bio(bio);
} }
bio = new; bio = new;
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE;
bio->bi_iter.bi_sector = lbo >> 9; bio->bi_iter.bi_sector = lbo >> 9;
for (;;) { for (;;) {
......
...@@ -518,7 +518,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, ...@@ -518,7 +518,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
* GFP_KERNEL that the local node can get fenced. It would be * GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this * nicest if we could pre-allocate these bios and avoid this
* all together. */ * all together. */
bio = bio_alloc(GFP_ATOMIC, 16); bio = bio_alloc(reg->hr_bdev, 16, op | op_flags, GFP_ATOMIC);
if (!bio) { if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n"); mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM); bio = ERR_PTR(-ENOMEM);
...@@ -527,10 +527,8 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, ...@@ -527,10 +527,8 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
/* Must put everything in 512 byte sectors for the bio... */ /* Must put everything in 512 byte sectors for the bio... */
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio_set_dev(bio, reg->hr_bdev);
bio->bi_private = wc; bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io; bio->bi_end_io = o2hb_bio_end_io;
bio_set_op_attrs(bio, op, op_flags);
vec_start = (cs << bits) % PAGE_SIZE; vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) { while(cs < max_slots) {
......
...@@ -86,16 +86,17 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, ...@@ -86,16 +86,17 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
int error, i; int error, i;
struct bio *bio; struct bio *bio;
if (page_count <= BIO_MAX_VECS) if (page_count <= BIO_MAX_VECS) {
bio = bio_alloc(GFP_NOIO, page_count); bio = bio_alloc(sb->s_bdev, page_count, REQ_OP_READ, GFP_NOIO);
else } else {
bio = bio_kmalloc(GFP_NOIO, page_count); bio = bio_kmalloc(GFP_NOIO, page_count);
bio_set_dev(bio, sb->s_bdev);
bio->bi_opf = REQ_OP_READ;
}
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
bio_set_dev(bio, sb->s_bdev);
bio->bi_opf = READ;
bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT); bio->bi_iter.bi_sector = block * (msblk->devblksize >> SECTOR_SHIFT);
for (i = 0; i < page_count; ++i) { for (i = 0; i < page_count; ++i) {
......
...@@ -61,10 +61,9 @@ xfs_rw_bdev( ...@@ -61,10 +61,9 @@ xfs_rw_bdev(
if (is_vmalloc && op == REQ_OP_WRITE) if (is_vmalloc && op == REQ_OP_WRITE)
flush_kernel_vmap_range(data, count); flush_kernel_vmap_range(data, count);
bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
bio_set_dev(bio, bdev); GFP_KERNEL);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_opf = op | REQ_META | REQ_SYNC;
do { do {
struct page *page = kmem_to_page(data); struct page *page = kmem_to_page(data);
...@@ -74,10 +73,9 @@ xfs_rw_bdev( ...@@ -74,10 +73,9 @@ xfs_rw_bdev(
while (bio_add_page(bio, page, len, off) != len) { while (bio_add_page(bio, page, len, off) != len) {
struct bio *prev = bio; struct bio *prev = bio;
bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left)); bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
bio_copy_dev(bio, prev); prev->bi_opf, GFP_KERNEL);
bio->bi_iter.bi_sector = bio_end_sector(prev); bio->bi_iter.bi_sector = bio_end_sector(prev);
bio->bi_opf = prev->bi_opf;
bio_chain(prev, bio); bio_chain(prev, bio);
submit_bio(prev); submit_bio(prev);
......
...@@ -1440,12 +1440,10 @@ xfs_buf_ioapply_map( ...@@ -1440,12 +1440,10 @@ xfs_buf_ioapply_map(
atomic_inc(&bp->b_io_remaining); atomic_inc(&bp->b_io_remaining);
nr_pages = bio_max_segs(total_nr_pages); nr_pages = bio_max_segs(total_nr_pages);
bio = bio_alloc(GFP_NOIO, nr_pages); bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
bio_set_dev(bio, bp->b_target->bt_bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp; bio->bi_private = bp;
bio->bi_opf = op;
for (; size && nr_pages; nr_pages--, page_index++) { for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset; int rbytes, nbytes = PAGE_SIZE - offset;
......
...@@ -692,12 +692,11 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) ...@@ -692,12 +692,11 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
if (!nr_pages) if (!nr_pages)
return 0; return 0;
bio = bio_alloc(GFP_NOFS, nr_pages); bio = bio_alloc(bdev, nr_pages,
bio_set_dev(bio, bdev); REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = zi->i_zsector; bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_write_hint = iocb->ki_hint; bio->bi_write_hint = iocb->ki_hint;
bio->bi_ioprio = iocb->ki_ioprio; bio->bi_ioprio = iocb->ki_ioprio;
bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_DSYNC) if (iocb->ki_flags & IOCB_DSYNC)
bio->bi_opf |= REQ_FUA; bio->bi_opf |= REQ_FUA;
......
...@@ -418,9 +418,10 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); ...@@ -418,9 +418,10 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio_set fs_bio_set; extern struct bio_set fs_bio_set;
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) static inline struct bio *bio_alloc(struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask)
{ {
return bio_alloc_bioset(NULL, nr_iovecs, 0, gfp_mask, &fs_bio_set); return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
} }
void submit_bio(struct bio *bio); void submit_bio(struct bio *bio);
......
...@@ -276,10 +276,9 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, ...@@ -276,10 +276,9 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
struct bio *bio; struct bio *bio;
int error = 0; int error = 0;
bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1); bio = bio_alloc(hib_resume_bdev, 1, op | op_flags,
GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio_set_dev(bio, hib_resume_bdev);
bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
pr_err("Adding page to bio failed at %llu\n", pr_err("Adding page to bio failed at %llu\n",
......
...@@ -338,10 +338,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, ...@@ -338,10 +338,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return 0; return 0;
} }
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(sis->bdev, 1,
bio_set_dev(bio, sis->bdev); REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
GFP_NOIO);
bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
bio->bi_end_io = end_write_func; bio->bi_end_io = end_write_func;
bio_add_page(bio, page, thp_size(page), 0); bio_add_page(bio, page, thp_size(page), 0);
...@@ -403,9 +403,7 @@ int swap_readpage(struct page *page, bool synchronous) ...@@ -403,9 +403,7 @@ int swap_readpage(struct page *page, bool synchronous)
} }
ret = 0; ret = 0;
bio = bio_alloc(GFP_KERNEL, 1); bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
bio_set_dev(bio, sis->bdev);
bio->bi_opf = REQ_OP_READ;
bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_iter.bi_sector = swap_page_sector(page);
bio->bi_end_io = end_swap_bio_read; bio->bi_end_io = end_swap_bio_read;
bio_add_page(bio, page, thp_size(page), 0); bio_add_page(bio, page, thp_size(page), 0);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册