提交 acda24c4 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "Here are the outstanding target pending fixes for v3.12-rc7.

  This includes a number of EXTENDED_COPY related fixes as a result of
  Thomas and Doug's continuing testing and feedback.

  Also included is an important vhost/scsi fix that addresses a long
  standing issue where the 'write' parameter for get_user_pages_fast()
  was incorrectly set for virtio-scsi WRITEs -> DMA_TO_DEVICE, and not
  for virtio-scsi READs -> DMA_FROM_DEVICE.

  This resulted in random userspace segfaults and other unpleasantness
  on KVM host, and unfortunately has been an issue since the initial
  merge of vhost/scsi in v3.6.  This patch is CC'ed to stable, along
  with two other less critical items"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  vhost/scsi: Fix incorrect usage of get_user_pages_fast write parameter
  target/pscsi: fix return value check
  target: Fail XCOPY for non matching source + destination block_size
  target: Generate failure for XCOPY I/O with non-zero scsi_status
  target: Add missing XCOPY I/O operation sense_buffer
  iser-target: check device before dereferencing its variable
  target: Return an error for WRITE SAME with ANCHOR==1
  target: Fix assignment of LUN in tracepoints
  target: Reject EXTENDED_COPY when emulate_3pc is disabled
  target: Allow non zero ListID in EXTENDED_COPY parameter list
  target: Make target_do_xcopy failures return INVALID_PARAMETER_LIST
...@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn) ...@@ -594,7 +594,7 @@ isert_connect_release(struct isert_conn *isert_conn)
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
if (device->use_frwr) if (device && device->use_frwr)
isert_conn_free_frwr_pool(isert_conn); isert_conn_free_frwr_pool(isert_conn);
if (isert_conn->conn_qp) { if (isert_conn->conn_qp) {
......
...@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) ...@@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode * pSCSI Host ID and enable for phba mode
*/ */
sh = scsi_host_lookup(phv->phv_host_id); sh = scsi_host_lookup(phv->phv_host_id);
if (IS_ERR(sh)) { if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for" pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id); " phv_host_id: %d\n", phv->phv_host_id);
return PTR_ERR(sh); return -EINVAL;
} }
phv->phv_lld_host = sh; phv->phv_lld_host = sh;
...@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev) ...@@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
sh = phv->phv_lld_host; sh = phv->phv_lld_host;
} else { } else {
sh = scsi_host_lookup(pdv->pdv_host_id); sh = scsi_host_lookup(pdv->pdv_host_id);
if (IS_ERR(sh)) { if (!sh) {
pr_err("pSCSI: Unable to locate" pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id); " pdv_host_id: %d\n", pdv->pdv_host_id);
return PTR_ERR(sh); return -EINVAL;
} }
} }
} else { } else {
......
...@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o ...@@ -263,6 +263,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
sectors, cmd->se_dev->dev_attrib.max_write_same_len); sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD; return TCM_INVALID_CDB_FIELD;
} }
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if (flags[0] & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
return TCM_INVALID_CDB_FIELD;
}
/* /*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code. * translated into block discard requests within backend code.
......
...@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op ...@@ -82,6 +82,9 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
mutex_lock(&g_device_mutex); mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) { list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
if (!se_dev->dev_attrib.emulate_3pc)
continue;
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
...@@ -357,6 +360,7 @@ struct xcopy_pt_cmd { ...@@ -357,6 +360,7 @@ struct xcopy_pt_cmd {
struct se_cmd se_cmd; struct se_cmd se_cmd;
struct xcopy_op *xcopy_op; struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem; struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
}; };
static struct se_port xcopy_pt_port; static struct se_port xcopy_pt_port;
...@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) ...@@ -675,7 +679,8 @@ static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
se_cmd->scsi_status); se_cmd->scsi_status);
return 0;
return (se_cmd->scsi_status) ? -EINVAL : 0;
} }
static int target_xcopy_read_source( static int target_xcopy_read_source(
...@@ -708,7 +713,7 @@ static int target_xcopy_read_source( ...@@ -708,7 +713,7 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length); (unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
DMA_FROM_DEVICE, 0, NULL); DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->src_pt_cmd = xpt_cmd; xop->src_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
...@@ -768,7 +773,7 @@ static int target_xcopy_write_destination( ...@@ -768,7 +773,7 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length); (unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
DMA_TO_DEVICE, 0, NULL); DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
xop->dst_pt_cmd = xpt_cmd; xop->dst_pt_cmd = xpt_cmd;
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
...@@ -884,30 +889,42 @@ static void target_xcopy_do_work(struct work_struct *work) ...@@ -884,30 +889,42 @@ static void target_xcopy_do_work(struct work_struct *work)
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
{ {
struct se_device *dev = se_cmd->se_dev;
struct xcopy_op *xop = NULL; struct xcopy_op *xop = NULL;
unsigned char *p = NULL, *seg_desc; unsigned char *p = NULL, *seg_desc;
unsigned int list_id, list_id_usage, sdll, inline_dl, sa; unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
int rc; int rc;
unsigned short tdll; unsigned short tdll;
if (!dev->dev_attrib.emulate_3pc) {
pr_err("EXTENDED_COPY operation explicitly disabled\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
sa = se_cmd->t_task_cdb[1] & 0x1f; sa = se_cmd->t_task_cdb[1] & 0x1f;
if (sa != 0x00) { if (sa != 0x00) {
pr_err("EXTENDED_COPY(LID4) not supported\n"); pr_err("EXTENDED_COPY(LID4) not supported\n");
return TCM_UNSUPPORTED_SCSI_OPCODE; return TCM_UNSUPPORTED_SCSI_OPCODE;
} }
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop) {
pr_err("Unable to allocate xcopy_op\n");
return TCM_OUT_OF_RESOURCES;
}
xop->xop_se_cmd = se_cmd;
p = transport_kmap_data_sg(se_cmd); p = transport_kmap_data_sg(se_cmd);
if (!p) { if (!p) {
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
kfree(xop);
return TCM_OUT_OF_RESOURCES; return TCM_OUT_OF_RESOURCES;
} }
list_id = p[0]; list_id = p[0];
if (list_id != 0x00) { list_id_usage = (p[1] & 0x18) >> 3;
pr_err("XCOPY with non zero list_id: 0x%02x\n", list_id);
goto out;
}
list_id_usage = (p[1] & 0x18);
/* /*
* Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
*/ */
...@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) ...@@ -920,13 +937,6 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out; goto out;
} }
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop) {
pr_err("Unable to allocate xcopy_op\n");
goto out;
}
xop->xop_se_cmd = se_cmd;
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl); tdll, sdll, inline_dl);
...@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) ...@@ -935,6 +945,17 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
if (rc <= 0) if (rc <= 0)
goto out; goto out;
if (xop->src_dev->dev_attrib.block_size !=
xop->dst_dev->dev_attrib.block_size) {
pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
" block_size: %u currently unsupported\n",
xop->src_dev->dev_attrib.block_size,
xop->dst_dev->dev_attrib.block_size);
xcopy_pt_undepend_remotedev(xop);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN); rc * XCOPY_TARGET_DESC_LEN);
seg_desc = &p[16]; seg_desc = &p[16];
...@@ -957,7 +978,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) ...@@ -957,7 +978,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
if (p) if (p)
transport_kunmap_data_sg(se_cmd); transport_kunmap_data_sg(se_cmd);
kfree(xop); kfree(xop);
return TCM_INVALID_CDB_FIELD; return ret;
} }
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
......
...@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) ...@@ -1056,7 +1056,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (data_direction != DMA_NONE) { if (data_direction != DMA_NONE) {
ret = vhost_scsi_map_iov_to_sgl(cmd, ret = vhost_scsi_map_iov_to_sgl(cmd,
&vq->iov[data_first], data_num, &vq->iov[data_first], data_num,
data_direction == DMA_TO_DEVICE); data_direction == DMA_FROM_DEVICE);
if (unlikely(ret)) { if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n"); vq_err(vq, "Failed to map iov to sgl\n");
goto err_free; goto err_free;
......
...@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start, ...@@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
), ),
TP_fast_assign( TP_fast_assign(
__entry->unpacked_lun = cmd->se_lun->unpacked_lun; __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0]; __entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length; __entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr; __entry->task_attribute = cmd->sam_task_attr;
...@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete, ...@@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
), ),
TP_fast_assign( TP_fast_assign(
__entry->unpacked_lun = cmd->se_lun->unpacked_lun; __entry->unpacked_lun = cmd->orig_fe_lun;
__entry->opcode = cmd->t_task_cdb[0]; __entry->opcode = cmd->t_task_cdb[0];
__entry->data_length = cmd->data_length; __entry->data_length = cmd->data_length;
__entry->task_attribute = cmd->sam_task_attr; __entry->task_attribute = cmd->sam_task_attr;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册