提交 1cc15701 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into this series:

   - Regression fix for ide-cd, ensuring that a request is fully
     initialized. From Hongxu.

   - Ditto fix for virtio_blk, from Bart.

   - NVMe fix from Keith, ensuring that we set the right block size on
     revalidation. If the block size changed, we'd be in trouble without
     it.

   - NVMe rdma fix from Sagi, fixing a potential hang while the
     controller is being removed"

* 'for-linus' of git://git.kernel.dk/linux-block:
  ide:ide-cd: fix kernel panic resulting from missing scsi_req_init
  nvme: Fix setting logical block format when revalidating
  virtio_blk: Fix an SG_IO regression
  nvme-rdma: fix possible hang when issuing commands during ctrl removal
...@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set) ...@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
return blk_mq_virtio_map_queues(set, vblk->vdev, 0); return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
} }
#ifdef CONFIG_VIRTIO_BLK_SCSI
static void virtblk_initialize_rq(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
scsi_req_init(&vbr->sreq);
}
#endif
static const struct blk_mq_ops virtio_mq_ops = { static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq, .queue_rq = virtio_queue_rq,
.complete = virtblk_request_done, .complete = virtblk_request_done,
.init_request = virtblk_init_request, .init_request = virtblk_init_request,
#ifdef CONFIG_VIRTIO_BLK_SCSI
.initialize_rq_fn = virtblk_initialize_rq,
#endif
.map_queues = virtblk_map_queues, .map_queues = virtblk_map_queues,
}; };
......
...@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) ...@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
struct scsi_request *req = scsi_req(rq); struct scsi_request *req = scsi_req(rq);
scsi_req_init(req);
memset(req->cmd, 0, BLK_MAX_CDB); memset(req->cmd, 0, BLK_MAX_CDB);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
......
...@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
goto out; goto out;
} }
__nvme_revalidate_disk(disk, id);
nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
if (!uuid_equal(&ns->uuid, &uuid) || if (!uuid_equal(&ns->uuid, &uuid) ||
memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
......
...@@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) ...@@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
/* /*
* reconnecting state means transport disruption, which * reconnecting state means transport disruption, which
* can take a long time and even might fail permanently, * can take a long time and even might fail permanently,
* so we can't let incoming I/O be requeued forever. * fail fast to give upper layers a chance to failover.
* fail it fast to allow upper layers a chance to * deleting state means that the ctrl will never accept
* failover. * commands again, fail it permanently.
*/ */
if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR; return BLK_STS_IOERR;
}
return BLK_STS_RESOURCE; /* try again later */ return BLK_STS_RESOURCE; /* try again later */
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册