提交 cb47c183 编写于 作者: L Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull target updates from Nicholas Bellinger:
 "There have been lots of work in a number of areas this past round.
  The highlights include:

   - Break out target_core_cdb.c emulation into SPC/SBC ops (hch)
   - Add a parse_cdb method to target backend drivers (hch)
   - Move sync_cache + write_same + unmap into spc_ops (hch)
   - Use target_execute_cmd for WRITEs in iscsi_target + srpt (hch)
   - Offload WRITE I/O backend submission in tcm_qla2xxx + tcm_fc (hch +
     nab)
   - Refactor core_update_device_list_for_node() into enable/disable
     funcs (agrover)
   - Replace the TCM processing thread with a TMR work queue (hch)
   - Fix regression in transport_add_device_to_core_hba from TMR
     conversion (DanC)
   - Remove racy, now-redundant check of sess_tearing_down with qla2xxx
     (roland)
   - Add range checking, fix reading of data len + possible underflow in
     UNMAP (roland)
   - Allow for target_submit_cmd() returning errors + convert fabrics
     (roland + nab)
   - Drop bogus struct file usage for iSCSI/SCTP (viro)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (54 commits)
  iscsi-target: Drop bogus struct file usage for iSCSI/SCTP
  target: NULL dereference on error path
  target: Allow for target_submit_cmd() returning errors
  target: Check number of unmap descriptors against our limit
  target: Fix possible integer underflow in UNMAP emulation
  target: Fix reading of data length fields for UNMAP commands
  target: Add range checking to UNMAP emulation
  target: Add generation of LOGICAL BLOCK ADDRESS OUT OF RANGE
  target: Make unnecessarily global se_dev_align_max_sectors() static
  target: Remove se_session.sess_wait_list
  qla2xxx: Remove racy, now-redundant check of sess_tearing_down
  target: Check sess_tearing_down in target_get_sess_cmd()
  sbp-target: Consolidate duplicated error path code in sbp_handle_command()
  target: Un-export target_get_sess_cmd()
  qla2xxx: Get rid of redundant qla_tgt_sess.tearing_down
  target: Make core_disable_device_list_for_node use pre-refactoring lock ordering
  target: refactor core_update_device_list_for_node()
  target: Eliminate else using boolean logic
  target: Misc retval cleanups
  target: Remove hba param from core_dev_add_lun
  ...
...@@ -1377,10 +1377,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1377,10 +1377,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
break; break;
case SRPT_STATE_NEED_DATA: case SRPT_STATE_NEED_DATA:
/* DMA_TO_DEVICE (write) - RDMA read error. */ /* DMA_TO_DEVICE (write) - RDMA read error. */
/* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags); spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP; ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags); spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
transport_generic_handle_data(&ioctx->cmd);
complete(&ioctx->cmd.transport_lun_stop_comp);
break; break;
case SRPT_STATE_CMD_RSP_SENT: case SRPT_STATE_CMD_RSP_SENT:
/* /*
...@@ -1463,9 +1467,10 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, ...@@ -1463,9 +1467,10 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
/** /**
* srpt_handle_rdma_comp() - Process an IB RDMA completion notification. * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
* *
* Note: transport_generic_handle_data() is asynchronous so unmapping the * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* data that has been transferred via IB RDMA must be postponed until the * the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. * check_stop_free() callback. None of this is nessecary anymore and needs to
* be cleaned up.
*/ */
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, struct srpt_send_ioctx *ioctx,
...@@ -1477,7 +1482,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, ...@@ -1477,7 +1482,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
if (opcode == SRPT_RDMA_READ_LAST) { if (opcode == SRPT_RDMA_READ_LAST) {
if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
SRPT_STATE_DATA_IN)) SRPT_STATE_DATA_IN))
transport_generic_handle_data(&ioctx->cmd); target_execute_cmd(&ioctx->cmd);
else else
printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__, printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx)); __LINE__, srpt_get_cmd_state(ioctx));
......
...@@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work) ...@@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id); atio->u.isp24.fcp_hdr.s_id);
if (sess) { /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
if (unlikely(sess->tearing_down)) { if (sess)
sess = NULL; kref_get(&sess->se_sess->sess_kref);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
goto out_term;
} else {
/*
* Do the extra kref_get() before dropping
* qla_hw_data->hardware_lock.
*/
kref_get(&sess->se_sess->sess_kref);
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (unlikely(!sess)) { if (unlikely(!sess)) {
......
...@@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl { ...@@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int); unsigned char *, uint32_t, int, int, int);
int (*handle_data)(struct qla_tgt_cmd *); void (*handle_data)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
uint32_t); uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *); void (*free_cmd)(struct qla_tgt_cmd *);
...@@ -813,7 +813,6 @@ struct qla_tgt_sess { ...@@ -813,7 +813,6 @@ struct qla_tgt_sess {
unsigned int conf_compl_supported:1; unsigned int conf_compl_supported:1;
unsigned int deleted:1; unsigned int deleted:1;
unsigned int local:1; unsigned int local:1;
unsigned int tearing_down:1;
struct se_session *se_sess; struct se_session *se_sess;
struct scsi_qla_host *vha; struct scsi_qla_host *vha;
......
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/configfs.h> #include <linux/configfs.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
...@@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess) ...@@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
vha = sess->vha; vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags); spin_lock_irqsave(&vha->hw->hardware_lock, flags);
sess->tearing_down = 1; target_sess_cmd_list_set_waiting(se_sess);
target_splice_sess_cmd_list(se_sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return 1; return 1;
...@@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, ...@@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL; return -EINVAL;
} }
target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr, cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags); data_dir, flags);
return 0;
} }
static void tcm_qla2xxx_do_rsp(struct work_struct *work) static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{ {
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
/*
* Dispatch ->queue_status from workqueue process context
*/
transport_generic_request_failure(&cmd->se_cmd);
}
/*
* Called from qla_target.c:qlt_do_ctio_completion()
*/
static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
unsigned long flags;
/* /*
* Ensure that the complete FCP WRITE payload has been received. * Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status. * Otherwise return an exception via CHECK_CONDITION status.
...@@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) ...@@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
* Check if se_cmd has already been aborted via LUN_RESET, and * Check if se_cmd has already been aborted via LUN_RESET, and
* waiting upon completion in tcm_qla2xxx_write_pending_status() * waiting upon completion in tcm_qla2xxx_write_pending_status()
*/ */
spin_lock_irqsave(&se_cmd->t_state_lock, flags); if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
if (se_cmd->transport_state & CMD_T_ABORTED) { complete(&cmd->se_cmd.t_transport_stop_comp);
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); return;
complete(&se_cmd->t_transport_stop_comp);
return 0;
} }
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD; cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp); transport_generic_request_failure(&cmd->se_cmd);
queue_work(tcm_qla2xxx_free_wq, &cmd->work); return;
return 0;
} }
/*
* We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE return target_execute_cmd(&cmd->se_cmd);
* status to the backstore processing thread. }
*/
return transport_generic_handle_data(&cmd->se_cmd); /*
* Called from qla_target.c:qlt_do_ctio_completion()
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work(tcm_qla2xxx_free_wq, &cmd->work);
} }
/* /*
...@@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { ...@@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.new_cmd_map = NULL,
.check_stop_free = tcm_qla2xxx_check_stop_free, .check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd, .release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session, .put_session = tcm_qla2xxx_put_session,
......
...@@ -9,7 +9,8 @@ target_core_mod-y := target_core_configfs.o \ ...@@ -9,7 +9,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_tmr.o \ target_core_tmr.o \
target_core_tpg.o \ target_core_tpg.o \
target_core_transport.o \ target_core_transport.o \
target_core_cdb.o \ target_core_sbc.o \
target_core_spc.o \
target_core_ua.o \ target_core_ua.o \
target_core_rd.o \ target_core_rd.o \
target_core_stat.o target_core_stat.o
......
...@@ -429,19 +429,8 @@ int iscsit_reset_np_thread( ...@@ -429,19 +429,8 @@ int iscsit_reset_np_thread(
int iscsit_del_np_comm(struct iscsi_np *np) int iscsit_del_np_comm(struct iscsi_np *np)
{ {
if (!np->np_socket) if (np->np_socket)
return 0; sock_release(np->np_socket);
/*
* Some network transports allocate their own struct sock->file,
* see if we need to free any additional allocated resources.
*/
if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
kfree(np->np_socket->file);
np->np_socket->file = NULL;
}
sock_release(np->np_socket);
return 0; return 0;
} }
...@@ -1413,8 +1402,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf) ...@@ -1413,8 +1402,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
iscsit_stop_dataout_timer(cmd); iscsit_stop_dataout_timer(cmd);
return (!ooo_cmdsn) ? transport_generic_handle_data( if (ooo_cmdsn)
&cmd->se_cmd) : 0; return 0;
target_execute_cmd(&cmd->se_cmd);
return 0;
} else /* DATAOUT_CANNOT_RECOVER */ } else /* DATAOUT_CANNOT_RECOVER */
return -1; return -1;
...@@ -2683,7 +2674,7 @@ static int iscsit_send_logout_response( ...@@ -2683,7 +2674,7 @@ static int iscsit_send_logout_response(
*/ */
logout_conn = iscsit_get_conn_from_cid_rcfr(sess, logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
cmd->logout_cid); cmd->logout_cid);
if ((logout_conn)) { if (logout_conn) {
iscsit_connection_reinstatement_rcfr(logout_conn); iscsit_connection_reinstatement_rcfr(logout_conn);
iscsit_dec_conn_usage_count(logout_conn); iscsit_dec_conn_usage_count(logout_conn);
} }
...@@ -4077,13 +4068,8 @@ int iscsit_close_connection( ...@@ -4077,13 +4068,8 @@ int iscsit_close_connection(
kfree(conn->conn_ops); kfree(conn->conn_ops);
conn->conn_ops = NULL; conn->conn_ops = NULL;
if (conn->sock) { if (conn->sock)
if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
kfree(conn->sock->file);
conn->sock->file = NULL;
}
sock_release(conn->sock); sock_release(conn->sock);
}
conn->thread_set = NULL; conn->thread_set = NULL;
pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
......
...@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute { ...@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t); ssize_t (*store)(void *, const char *, size_t);
}; };
struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
struct config_item *item,
struct iscsi_tiqn **tiqn_out)
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
int ret;
if (!tpg) {
pr_err("Unable to locate struct iscsi_portal_group "
"pointer\n");
return NULL;
}
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return NULL;
*tiqn_out = tpg->tpg_tiqn;
return tpg;
}
/* Start items for lio_target_portal_cit */ /* Start items for lio_target_portal_cit */
static ssize_t lio_target_np_show_sctp( static ssize_t lio_target_np_show_sctp(
......
...@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table { ...@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
/* Used for struct iscsi_np->np_flags */ /* Used for struct iscsi_np->np_flags */
enum np_flags_table { enum np_flags_table {
NPF_IP_NETWORK = 0x00, NPF_IP_NETWORK = 0x00,
NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
}; };
/* Used for struct iscsi_np->np_thread_state */ /* Used for struct iscsi_np->np_thread_state */
...@@ -481,6 +480,7 @@ struct iscsi_tmr_req { ...@@ -481,6 +480,7 @@ struct iscsi_tmr_req {
bool task_reassign:1; bool task_reassign:1;
u32 ref_cmd_sn; u32 ref_cmd_sn;
u32 exp_data_sn; u32 exp_data_sn;
struct iscsi_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery; struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req; struct se_tmr_req *se_tmr_req;
}; };
...@@ -503,7 +503,6 @@ struct iscsi_conn { ...@@ -503,7 +503,6 @@ struct iscsi_conn {
u16 local_port; u16 local_port;
int net_size; int net_size;
u32 auth_id; u32 auth_id;
#define CONNFLAG_SCTP_STRUCT_FILE 0x01
u32 conn_flags; u32 conn_flags;
/* Used for iscsi_tx_login_rsp() */ /* Used for iscsi_tx_login_rsp() */
u32 login_itt; u32 login_itt;
......
...@@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo) ...@@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (cmd->immediate_data) { if (cmd->immediate_data) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
return transport_generic_handle_data( target_execute_cmd(&cmd->se_cmd);
&cmd->se_cmd); return 0;
} }
spin_unlock_bh(&cmd->istate_lock); spin_unlock_bh(&cmd->istate_lock);
......
...@@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih( ...@@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih(
* initiator and release the new connection. * initiator and release the new connection.
*/ */
conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid); conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
if ((conn_ptr)) { if (conn_ptr) {
pr_err("Connection exists with CID %hu for %s," pr_err("Connection exists with CID %hu for %s,"
" performing connection reinstatement.\n", " performing connection reinstatement.\n",
conn_ptr->cid, sess->sess_ops->InitiatorName); conn_ptr->cid, sess->sess_ops->InitiatorName);
...@@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih( ...@@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih(
if (sess->sess_ops->ErrorRecoveryLevel == 2) { if (sess->sess_ops->ErrorRecoveryLevel == 2) {
cr = iscsit_get_inactive_connection_recovery_entry( cr = iscsit_get_inactive_connection_recovery_entry(
sess, cid); sess, cid);
if ((cr)) { if (cr) {
pr_debug("Performing implicit logout" pr_debug("Performing implicit logout"
" for connection recovery on CID: %hu\n", " for connection recovery on CID: %hu\n",
conn->cid); conn->cid);
...@@ -794,22 +794,6 @@ int iscsi_target_setup_login_socket( ...@@ -794,22 +794,6 @@ int iscsi_target_setup_login_socket(
return ret; return ret;
} }
np->np_socket = sock; np->np_socket = sock;
/*
* The SCTP stack needs struct socket->file.
*/
if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
(np->np_network_transport == ISCSI_SCTP_UDP)) {
if (!sock->file) {
sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
if (!sock->file) {
pr_err("Unable to allocate struct"
" file for SCTP\n");
ret = -ENOMEM;
goto fail;
}
np->np_flags |= NPF_SCTP_STRUCT_FILE;
}
}
/* /*
* Setup the np->np_sockaddr from the passed sockaddr setup * Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code.. * in iscsi_target_configfs.c code..
...@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket( ...@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket(
fail: fail:
np->np_socket = NULL; np->np_socket = NULL;
if (sock) { if (sock)
if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
kfree(sock->file);
sock->file = NULL;
}
sock_release(sock); sock_release(sock);
}
return ret; return ret;
} }
static int __iscsi_target_login_thread(struct iscsi_np *np) static int __iscsi_target_login_thread(struct iscsi_np *np)
{ {
u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
int err, ret = 0, set_sctp_conn_flag, stop; int err, ret = 0, stop;
struct iscsi_conn *conn = NULL; struct iscsi_conn *conn = NULL;
struct iscsi_login *login; struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL; struct iscsi_portal_group *tpg = NULL;
...@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
struct sockaddr_in6 sock_in6; struct sockaddr_in6 sock_in6;
flush_signals(current); flush_signals(current);
set_sctp_conn_flag = 0;
sock = np->np_socket; sock = np->np_socket;
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
...@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
goto out; goto out;
} }
/*
* The SCTP stack needs struct socket->file.
*/
if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
(np->np_network_transport == ISCSI_SCTP_UDP)) {
if (!new_sock->file) {
new_sock->file = kzalloc(
sizeof(struct file), GFP_KERNEL);
if (!new_sock->file) {
pr_err("Unable to allocate struct"
" file for SCTP\n");
sock_release(new_sock);
/* Get another socket */
return 1;
}
set_sctp_conn_flag = 1;
}
}
iscsi_start_login_thread_timer(np); iscsi_start_login_thread_timer(np);
conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
if (!conn) { if (!conn) {
pr_err("Could not allocate memory for" pr_err("Could not allocate memory for"
" new connection\n"); " new connection\n");
if (set_sctp_conn_flag) {
kfree(new_sock->file);
new_sock->file = NULL;
}
sock_release(new_sock); sock_release(new_sock);
/* Get another socket */ /* Get another socket */
return 1; return 1;
...@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
conn->conn_state = TARG_CONN_STATE_FREE; conn->conn_state = TARG_CONN_STATE_FREE;
conn->sock = new_sock; conn->sock = new_sock;
if (set_sctp_conn_flag)
conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP; conn->conn_state = TARG_CONN_STATE_XPT_UP;
...@@ -1081,7 +1032,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1081,7 +1032,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out; goto new_sess_out;
zero_tsih = (pdu->tsih == 0x0000); zero_tsih = (pdu->tsih == 0x0000);
if ((zero_tsih)) { if (zero_tsih) {
/* /*
* This is the leading connection of a new session. * This is the leading connection of a new session.
* We wait until after authentication to check for * We wait until after authentication to check for
...@@ -1205,13 +1156,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1205,13 +1156,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
iscsi_release_param_list(conn->param_list); iscsi_release_param_list(conn->param_list);
conn->param_list = NULL; conn->param_list = NULL;
} }
if (conn->sock) { if (conn->sock)
if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
kfree(conn->sock->file);
conn->sock->file = NULL;
}
sock_release(conn->sock); sock_release(conn->sock);
}
kfree(conn); kfree(conn);
if (tpg) { if (tpg) {
......
...@@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value) ...@@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL); param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
if (!param->value) { if (!param->value) {
pr_err("Unable to allocate memory for value.\n"); pr_err("Unable to allocate memory for value.\n");
return -1; return -ENOMEM;
} }
memcpy(param->value, value, strlen(value)); memcpy(param->value, value, strlen(value));
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
******************************************************************************/ ******************************************************************************/
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h> #include <scsi/iscsi_proto.h>
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
...@@ -61,7 +62,7 @@ u8 iscsit_tmr_abort_task( ...@@ -61,7 +62,7 @@ u8 iscsit_tmr_abort_task(
} }
se_tmr->ref_task_tag = hdr->rtt; se_tmr->ref_task_tag = hdr->rtt;
se_tmr->ref_cmd = &ref_cmd->se_cmd; tmr_req->ref_cmd = ref_cmd;
tmr_req->ref_cmd_sn = hdr->refcmdsn; tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn; tmr_req->exp_data_sn = hdr->exp_datasn;
...@@ -121,7 +122,7 @@ u8 iscsit_tmr_task_reassign( ...@@ -121,7 +122,7 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req; struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req; struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf; struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
int ret; int ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x," pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n", " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
...@@ -155,9 +156,16 @@ u8 iscsit_tmr_task_reassign( ...@@ -155,9 +156,16 @@ u8 iscsit_tmr_task_reassign(
return ISCSI_TMF_RSP_REJECTED; return ISCSI_TMF_RSP_REJECTED;
} }
ref_lun = scsilun_to_int(&hdr->lun);
if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
pr_err("Unable to perform connection recovery for"
" differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
ref_lun, ref_cmd->se_cmd.orig_fe_lun);
return ISCSI_TMF_RSP_REJECTED;
}
se_tmr->ref_task_tag = hdr->rtt; se_tmr->ref_task_tag = hdr->rtt;
se_tmr->ref_cmd = &ref_cmd->se_cmd; tmr_req->ref_cmd = ref_cmd;
se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
tmr_req->ref_cmd_sn = hdr->refcmdsn; tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn; tmr_req->exp_data_sn = hdr->exp_datasn;
tmr_req->conn_recovery = cr; tmr_req->conn_recovery = cr;
...@@ -191,9 +199,7 @@ static int iscsit_task_reassign_complete_nop_out( ...@@ -191,9 +199,7 @@ static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req, struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn) struct iscsi_conn *conn)
{ {
struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct se_cmd *se_cmd = se_tmr->ref_cmd;
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn_recovery *cr; struct iscsi_conn_recovery *cr;
if (!cmd->cr) { if (!cmd->cr) {
...@@ -251,7 +257,8 @@ static int iscsit_task_reassign_complete_write( ...@@ -251,7 +257,8 @@ static int iscsit_task_reassign_complete_write(
pr_debug("WRITE ITT: 0x%08x: t_state: %d" pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n", " never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state); cmd->init_task_tag, cmd->se_cmd.t_state);
return transport_generic_handle_data(se_cmd); target_execute_cmd(se_cmd);
return 0;
} }
cmd->i_state = ISTATE_SEND_STATUS; cmd->i_state = ISTATE_SEND_STATUS;
...@@ -360,9 +367,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( ...@@ -360,9 +367,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req, struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn) struct iscsi_conn *conn)
{ {
struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct se_cmd *se_cmd = se_tmr->ref_cmd;
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_conn_recovery *cr; struct iscsi_conn_recovery *cr;
if (!cmd->cr) { if (!cmd->cr) {
...@@ -385,7 +390,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd( ...@@ -385,7 +390,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock); spin_unlock_bh(&conn->cmd_lock);
if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS; cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0; return 0;
...@@ -411,17 +416,14 @@ static int iscsit_task_reassign_complete( ...@@ -411,17 +416,14 @@ static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req, struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn) struct iscsi_conn *conn)
{ {
struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
struct se_cmd *se_cmd;
struct iscsi_cmd *cmd; struct iscsi_cmd *cmd;
int ret = 0; int ret = 0;
if (!se_tmr->ref_cmd) { if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n"); pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
return -1; return -1;
} }
se_cmd = se_tmr->ref_cmd; cmd = tmr_req->ref_cmd;
cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
cmd->conn = conn; cmd->conn = conn;
...@@ -547,9 +549,7 @@ int iscsit_task_reassign_prepare_write( ...@@ -547,9 +549,7 @@ int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req, struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn) struct iscsi_conn *conn)
{ {
struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct se_cmd *se_cmd = se_tmr->ref_cmd;
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
struct iscsi_pdu *pdu = NULL; struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp; struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0; int first_incomplete_r2t = 1, i = 0;
...@@ -782,14 +782,12 @@ int iscsit_check_task_reassign_expdatasn( ...@@ -782,14 +782,12 @@ int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req, struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn) struct iscsi_conn *conn)
{ {
struct se_tmr_req *se_tmr = tmr_req->se_tmr_req; struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
struct se_cmd *se_cmd = se_tmr->ref_cmd;
struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0; return 0;
if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0; return 0;
if (ref_cmd->data_direction == DMA_NONE) if (ref_cmd->data_direction == DMA_NONE)
......
...@@ -303,6 +303,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) ...@@ -303,6 +303,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{ {
struct iscsi_param *param; struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
int ret;
spin_lock(&tpg->tpg_state_lock); spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) { if (tpg->tpg_state == TPG_STATE_ACTIVE) {
...@@ -319,19 +320,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) ...@@ -319,19 +320,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param) { if (!param) {
spin_unlock(&tpg->tpg_state_lock); spin_unlock(&tpg->tpg_state_lock);
return -ENOMEM; return -EINVAL;
} }
if (ISCSI_TPG_ATTRIB(tpg)->authentication) { if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
if (!strcmp(param->value, NONE)) if (!strcmp(param->value, NONE)) {
if (iscsi_update_param_value(param, CHAP) < 0) { ret = iscsi_update_param_value(param, CHAP);
spin_unlock(&tpg->tpg_state_lock); if (ret)
return -ENOMEM; goto err;
}
if (iscsit_ta_authentication(tpg, 1) < 0) {
spin_unlock(&tpg->tpg_state_lock);
return -ENOMEM;
} }
ret = iscsit_ta_authentication(tpg, 1);
if (ret < 0)
goto err;
} }
tpg->tpg_state = TPG_STATE_ACTIVE; tpg->tpg_state = TPG_STATE_ACTIVE;
...@@ -344,6 +345,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) ...@@ -344,6 +345,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
spin_unlock(&tiqn->tiqn_tpg_lock); spin_unlock(&tiqn->tiqn_tpg_lock);
return 0; return 0;
err:
spin_unlock(&tpg->tpg_state_lock);
return ret;
} }
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force) int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
...@@ -558,7 +563,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) ...@@ -558,7 +563,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
if ((authentication != 1) && (authentication != 0)) { if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:" pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication); " %u, ignoring request.\n", authentication);
return -1; return -EINVAL;
} }
memset(buf1, 0, sizeof(buf1)); memset(buf1, 0, sizeof(buf1));
...@@ -593,7 +598,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) ...@@ -593,7 +598,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
} else { } else {
snprintf(buf1, sizeof(buf1), "%s", param->value); snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE); none = strstr(buf1, NONE);
if ((none)) if (none)
goto out; goto out;
strncat(buf1, ",", strlen(",")); strncat(buf1, ",", strlen(","));
strncat(buf1, NONE, strlen(NONE)); strncat(buf1, NONE, strlen(NONE));
......
...@@ -211,12 +211,11 @@ static void tcm_loop_submission_work(struct work_struct *work) ...@@ -211,12 +211,11 @@ static void tcm_loop_submission_work(struct work_struct *work)
/* /*
* Because some userspace code via scsi-generic do not memset their * Because some userspace code via scsi-generic do not memset their
* associated read buffers, go ahead and do that here for type * associated read buffers, go ahead and do that here for type
* SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently * non-data CDBs. Also note that this is currently guaranteed to be a
* guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB * single SGL for this case by target core in
* by target core in target_setup_cmd_from_cdb() -> * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer().
* transport_generic_cmd_sequencer().
*/ */
if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB && if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) { se_cmd->data_direction == DMA_FROM_DEVICE) {
struct scatterlist *sg = scsi_sglist(sc); struct scatterlist *sg = scsi_sglist(sc);
unsigned char *buf = kmap(sg_page(sg)) + sg->offset; unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
...@@ -779,7 +778,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd) ...@@ -779,7 +778,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
* We now tell TCM to add this WRITE CDB directly into the TCM storage * We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue. * object execution queue.
*/ */
transport_generic_process_write(se_cmd); target_execute_cmd(se_cmd);
return 0; return 0;
} }
......
...@@ -1219,28 +1219,14 @@ static void sbp_handle_command(struct sbp_target_request *req) ...@@ -1219,28 +1219,14 @@ static void sbp_handle_command(struct sbp_target_request *req)
ret = sbp_fetch_command(req); ret = sbp_fetch_command(req);
if (ret) { if (ret) {
pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
req->status.status |= cpu_to_be32( goto err;
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
sbp_free_request(req);
return;
} }
ret = sbp_fetch_page_table(req); ret = sbp_fetch_page_table(req);
if (ret) { if (ret) {
pr_debug("sbp_handle_command: fetch page table failed: %d\n", pr_debug("sbp_handle_command: fetch page table failed: %d\n",
ret); ret);
req->status.status |= cpu_to_be32( goto err;
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
sbp_free_request(req);
return;
} }
unpacked_lun = req->login->lun->unpacked_lun; unpacked_lun = req->login->lun->unpacked_lun;
...@@ -1249,9 +1235,21 @@ static void sbp_handle_command(struct sbp_target_request *req) ...@@ -1249,9 +1235,21 @@ static void sbp_handle_command(struct sbp_target_request *req)
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir); req->orb_pointer, unpacked_lun, data_length, data_dir);
target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length, req->sense_buf, unpacked_lun, data_length,
MSG_SIMPLE_TAG, data_dir, 0); MSG_SIMPLE_TAG, data_dir, 0))
goto err;
return;
err:
req->status.status |= cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
STATUS_BLOCK_DEAD(0) |
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
sbp_free_request(req);
} }
/* /*
...@@ -1784,8 +1782,7 @@ static int sbp_write_pending(struct se_cmd *se_cmd) ...@@ -1784,8 +1782,7 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
return ret; return ret;
} }
transport_generic_process_write(se_cmd); target_execute_cmd(se_cmd);
return 0; return 0;
} }
......
...@@ -300,8 +300,8 @@ int core_free_device_list_for_node( ...@@ -300,8 +300,8 @@ int core_free_device_list_for_node(
lun = deve->se_lun; lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, deve->mapped_lun, core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
} }
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
...@@ -342,72 +342,46 @@ void core_update_device_list_access( ...@@ -342,72 +342,46 @@ void core_update_device_list_access(
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
} }
/* core_update_device_list_for_node(): /* core_enable_device_list_for_node():
* *
* *
*/ */
int core_update_device_list_for_node( int core_enable_device_list_for_node(
struct se_lun *lun, struct se_lun *lun,
struct se_lun_acl *lun_acl, struct se_lun_acl *lun_acl,
u32 mapped_lun, u32 mapped_lun,
u32 lun_access, u32 lun_access,
struct se_node_acl *nacl, struct se_node_acl *nacl,
struct se_portal_group *tpg, struct se_portal_group *tpg)
int enable)
{ {
struct se_port *port = lun->lun_sep; struct se_port *port = lun->lun_sep;
struct se_dev_entry *deve = nacl->device_list[mapped_lun]; struct se_dev_entry *deve;
int trans = 0;
/*
* If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
if (!enable) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly concerted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->alua_port_list from
* port->sep_alua_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
spin_lock_bh(&port->sep_alua_lock);
list_del(&deve->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
}
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
if (enable) {
/* deve = nacl->device_list[mapped_lun];
* Check if the call is handling demo mode -> explict LUN ACL
* transition. This transition must be for the same struct se_lun /*
* + mapped_lun that was setup in demo mode.. * Check if the call is handling demo mode -> explict LUN ACL
*/ * transition. This transition must be for the same struct se_lun
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { * + mapped_lun that was setup in demo mode..
if (deve->se_lun_acl != NULL) { */
pr_err("struct se_dev_entry->se_lun_acl" if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
" already set for demo mode -> explict" if (deve->se_lun_acl != NULL) {
" LUN ACL transition\n"); pr_err("struct se_dev_entry->se_lun_acl"
spin_unlock_irq(&nacl->device_list_lock); " already set for demo mode -> explict"
return -EINVAL; " LUN ACL transition\n");
} spin_unlock_irq(&nacl->device_list_lock);
if (deve->se_lun != lun) { return -EINVAL;
pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
}
deve->se_lun_acl = lun_acl;
trans = 1;
} else {
deve->se_lun = lun;
deve->se_lun_acl = lun_acl;
deve->mapped_lun = mapped_lun;
deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
} }
if (deve->se_lun != lun) {
pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
return -EINVAL;
}
deve->se_lun_acl = lun_acl;
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
...@@ -417,27 +391,72 @@ int core_update_device_list_for_node( ...@@ -417,27 +391,72 @@ int core_update_device_list_for_node(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
} }
if (trans) {
spin_unlock_irq(&nacl->device_list_lock);
return 0;
}
deve->creation_time = get_jiffies_64();
deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
return 0;
}
spin_lock_bh(&port->sep_alua_lock); deve->se_lun = lun;
list_add_tail(&deve->alua_port_list, &port->sep_alua_list); deve->se_lun_acl = lun_acl;
spin_unlock_bh(&port->sep_alua_lock); deve->mapped_lun = mapped_lun;
deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
return 0; if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
} }
deve->creation_time = get_jiffies_64();
deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock);
spin_lock_bh(&port->sep_alua_lock);
list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
spin_unlock_bh(&port->sep_alua_lock);
return 0;
}
/* core_disable_device_list_for_node():
*
*
*/
int core_disable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
struct se_dev_entry *deve = nacl->device_list[mapped_lun];
/*
* If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->alua_port_list from
* port->sep_alua_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
spin_lock_bh(&port->sep_alua_lock);
list_del(&deve->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
/* /*
* Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
* PR operation to complete. * PR operation to complete.
*/ */
spin_unlock_irq(&nacl->device_list_lock);
while (atomic_read(&deve->pr_ref_count) != 0) while (atomic_read(&deve->pr_ref_count) != 0)
cpu_relax(); cpu_relax();
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
/* /*
* Disable struct se_dev_entry LUN ACL mapping * Disable struct se_dev_entry LUN ACL mapping
...@@ -475,9 +494,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -475,9 +494,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
continue; continue;
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, core_disable_device_list_for_node(lun, NULL,
deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
nacl, tpg, 0); nacl, tpg);
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
} }
...@@ -715,7 +734,7 @@ void se_release_device_for_hba(struct se_device *dev) ...@@ -715,7 +734,7 @@ void se_release_device_for_hba(struct se_device *dev)
se_dev_stop(dev); se_dev_stop(dev);
if (dev->dev_ptr) { if (dev->dev_ptr) {
kthread_stop(dev->process_thread); destroy_workqueue(dev->tmr_wq);
if (dev->transport->free_device) if (dev->transport->free_device)
dev->transport->free_device(dev->dev_ptr); dev->transport->free_device(dev->dev_ptr);
} }
...@@ -822,7 +841,7 @@ int se_dev_check_shutdown(struct se_device *dev) ...@@ -822,7 +841,7 @@ int se_dev_check_shutdown(struct se_device *dev)
return ret; return ret;
} }
u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{ {
u32 tmp, aligned_max_sectors; u32 tmp, aligned_max_sectors;
/* /*
...@@ -1273,7 +1292,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) ...@@ -1273,7 +1292,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
struct se_lun *core_dev_add_lun( struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg, struct se_portal_group *tpg,
struct se_hba *hba,
struct se_device *dev, struct se_device *dev,
u32 lun) u32 lun)
{ {
...@@ -1298,7 +1316,7 @@ struct se_lun *core_dev_add_lun( ...@@ -1298,7 +1316,7 @@ struct se_lun *core_dev_add_lun(
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/* /*
* Update LUN maps for dynamically added initiators when * Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled. * generate_node_acl is enabled.
...@@ -1470,8 +1488,8 @@ int core_dev_add_initiator_node_lun_acl( ...@@ -1470,8 +1488,8 @@ int core_dev_add_initiator_node_lun_acl(
lacl->se_lun = lun; lacl->se_lun = lun;
if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access, nacl, tpg, 1) < 0) lun_access, nacl, tpg) < 0)
return -EINVAL; return -EINVAL;
spin_lock(&lun->lun_acl_lock); spin_lock(&lun->lun_acl_lock);
...@@ -1514,8 +1532,8 @@ int core_dev_del_initiator_node_lun_acl( ...@@ -1514,8 +1532,8 @@ int core_dev_del_initiator_node_lun_acl(
smp_mb__after_atomic_dec(); smp_mb__after_atomic_dec();
spin_unlock(&lun->lun_acl_lock); spin_unlock(&lun->lun_acl_lock);
core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
lacl->se_lun = NULL; lacl->se_lun = NULL;
......
...@@ -764,8 +764,7 @@ static int target_fabric_port_link( ...@@ -764,8 +764,7 @@ static int target_fabric_port_link(
goto out; goto out;
} }
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
lun->unpacked_lun);
if (IS_ERR(lun_p)) { if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n"); pr_err("core_dev_add_lun() failed\n");
ret = PTR_ERR(lun_p); ret = PTR_ERR(lun_p);
......
...@@ -331,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -331,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
return 1; return 1;
} }
static void fd_emulate_sync_cache(struct se_cmd *cmd) static int fd_execute_sync_cache(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr; struct fd_dev *fd_dev = dev->dev_ptr;
...@@ -365,7 +365,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) ...@@ -365,7 +365,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (immed) if (immed)
return; return 0;
if (ret) { if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
...@@ -373,11 +373,15 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd) ...@@ -373,11 +373,15 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
} else { } else {
target_complete_cmd(cmd, SAM_STAT_GOOD); target_complete_cmd(cmd, SAM_STAT_GOOD);
} }
return 0;
} }
static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, static int fd_execute_rw(struct se_cmd *cmd)
u32 sgl_nents, enum dma_data_direction data_direction)
{ {
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
int ret = 0; int ret = 0;
...@@ -550,6 +554,16 @@ static sector_t fd_get_blocks(struct se_device *dev) ...@@ -550,6 +554,16 @@ static sector_t fd_get_blocks(struct se_device *dev)
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size); return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
} }
static struct spc_ops fd_spc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
};
static int fd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &fd_spc_ops);
}
static struct se_subsystem_api fileio_template = { static struct se_subsystem_api fileio_template = {
.name = "fileio", .name = "fileio",
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -561,8 +575,7 @@ static struct se_subsystem_api fileio_template = { ...@@ -561,8 +575,7 @@ static struct se_subsystem_api fileio_template = {
.allocate_virtdevice = fd_allocate_virtdevice, .allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice, .create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device, .free_device = fd_free_device,
.execute_cmd = fd_execute_cmd, .parse_cdb = fd_parse_cdb,
.do_sync_cache = fd_emulate_sync_cache,
.check_configfs_dev_params = fd_check_configfs_dev_params, .check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params,
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
...@@ -96,6 +97,7 @@ static struct se_device *iblock_create_virtdevice( ...@@ -96,6 +97,7 @@ static struct se_device *iblock_create_virtdevice(
struct request_queue *q; struct request_queue *q;
struct queue_limits *limits; struct queue_limits *limits;
u32 dev_flags = 0; u32 dev_flags = 0;
fmode_t mode;
int ret = -EINVAL; int ret = -EINVAL;
if (!ib_dev) { if (!ib_dev) {
...@@ -117,8 +119,11 @@ static struct se_device *iblock_create_virtdevice( ...@@ -117,8 +119,11 @@ static struct se_device *iblock_create_virtdevice(
pr_debug( "IBLOCK: Claiming struct block_device: %s\n", pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path); ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode = FMODE_READ|FMODE_EXCL;
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); if (!ib_dev->ibd_readonly)
mode |= FMODE_WRITE;
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) { if (IS_ERR(bd)) {
ret = PTR_ERR(bd); ret = PTR_ERR(bd);
goto failed; goto failed;
...@@ -292,7 +297,7 @@ static void iblock_end_io_flush(struct bio *bio, int err) ...@@ -292,7 +297,7 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache. * always flush the whole cache.
*/ */
static void iblock_emulate_sync_cache(struct se_cmd *cmd) static int iblock_execute_sync_cache(struct se_cmd *cmd)
{ {
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2); int immed = (cmd->t_task_cdb[1] & 0x2);
...@@ -311,23 +316,98 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd) ...@@ -311,23 +316,98 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd)
if (!immed) if (!immed)
bio->bi_private = cmd; bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio); submit_bio(WRITE_FLUSH, bio);
return 0;
} }
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) static int iblock_execute_unmap(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev;
struct iblock_dev *ibd = dev->dev_ptr; struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd; unsigned char *buf, *ptr = NULL;
int barrier = 0; sector_t lba;
int size = cmd->data_length;
u32 range;
int ret = 0;
int dl, bd_dl;
buf = transport_kmap_data_sg(cmd);
dl = get_unaligned_be16(&buf[0]);
bd_dl = get_unaligned_be16(&buf[2]);
size = min(size - 8, bd_dl);
if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto err;
}
/* First UNMAP block descriptor starts at 8 byte offset */
ptr = &buf[8];
pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size >= 16) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
ret = -EINVAL;
goto err;
}
if (lba + range > dev->transport->get_blocks(dev) + 1) {
cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
ret = -EINVAL;
goto err;
}
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
GFP_KERNEL, 0);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n",
ret);
goto err;
}
ptr += 16;
size -= 16;
}
err:
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
static int iblock_execute_write_same(struct se_cmd *cmd)
{
struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
int ret;
ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
spc_get_write_same_sectors(cmd), GFP_KERNEL,
0);
if (ret < 0) {
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
return ret;
}
target_complete_cmd(cmd, GOOD);
return 0;
} }
enum { enum {
Opt_udev_path, Opt_force, Opt_err Opt_udev_path, Opt_readonly, Opt_force, Opt_err
}; };
static match_table_t tokens = { static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"}, {Opt_udev_path, "udev_path=%s"},
{Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"}, {Opt_force, "force=%d"},
{Opt_err, NULL} {Opt_err, NULL}
}; };
...@@ -340,6 +420,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, ...@@ -340,6 +420,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
char *orig, *ptr, *arg_p, *opts; char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS]; substring_t args[MAX_OPT_ARGS];
int ret = 0, token; int ret = 0, token;
unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL); opts = kstrdup(page, GFP_KERNEL);
if (!opts) if (!opts)
...@@ -372,6 +453,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, ...@@ -372,6 +453,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
ib_dev->ibd_udev_path); ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break; break;
case Opt_readonly:
arg_p = match_strdup(&args[0]);
if (!arg_p) {
ret = -ENOMEM;
break;
}
ret = strict_strtoul(arg_p, 0, &tmp_readonly);
kfree(arg_p);
if (ret < 0) {
pr_err("strict_strtoul() failed for"
" readonly=\n");
goto out;
}
ib_dev->ibd_readonly = tmp_readonly;
pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
break;
case Opt_force: case Opt_force:
break; break;
default: default:
...@@ -411,11 +508,10 @@ static ssize_t iblock_show_configfs_dev_params( ...@@ -411,11 +508,10 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd) if (bd)
bl += sprintf(b + bl, "iBlock device: %s", bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf)); bdevname(bd, buf));
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s\n", bl += sprintf(b + bl, " UDEV PATH: %s",
ibd->ibd_udev_path); ibd->ibd_udev_path);
} else bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
bl += sprintf(b + bl, "\n");
bl += sprintf(b + bl, " "); bl += sprintf(b + bl, " ");
if (bd) { if (bd) {
...@@ -493,9 +589,11 @@ static void iblock_submit_bios(struct bio_list *list, int rw) ...@@ -493,9 +589,11 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, static int iblock_execute_rw(struct se_cmd *cmd)
u32 sgl_nents, enum dma_data_direction data_direction)
{ {
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr; struct iblock_req *ibr;
struct bio *bio; struct bio *bio;
...@@ -642,6 +740,18 @@ static void iblock_bio_done(struct bio *bio, int err) ...@@ -642,6 +740,18 @@ static void iblock_bio_done(struct bio *bio, int err)
iblock_complete_cmd(cmd); iblock_complete_cmd(cmd);
} }
static struct spc_ops iblock_spc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
.execute_unmap = iblock_execute_unmap,
};
static int iblock_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &iblock_spc_ops);
}
static struct se_subsystem_api iblock_template = { static struct se_subsystem_api iblock_template = {
.name = "iblock", .name = "iblock",
.owner = THIS_MODULE, .owner = THIS_MODULE,
...@@ -653,9 +763,7 @@ static struct se_subsystem_api iblock_template = { ...@@ -653,9 +763,7 @@ static struct se_subsystem_api iblock_template = {
.allocate_virtdevice = iblock_allocate_virtdevice, .allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice, .create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device, .free_device = iblock_free_device,
.execute_cmd = iblock_execute_cmd, .parse_cdb = iblock_parse_cdb,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,
.check_configfs_dev_params = iblock_check_configfs_dev_params, .check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params,
......
...@@ -18,6 +18,7 @@ struct iblock_dev { ...@@ -18,6 +18,7 @@ struct iblock_dev {
u32 ibd_flags; u32 ibd_flags;
struct bio_set *ibd_bio_set; struct bio_set *ibd_bio_set;
struct block_device *ibd_bd; struct block_device *ibd_bd;
bool ibd_readonly;
} ____cacheline_aligned; } ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */ #endif /* TARGET_CORE_IBLOCK_H */
...@@ -4,25 +4,16 @@ ...@@ -4,25 +4,16 @@
/* target_core_alua.c */ /* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp; extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_cdb.c */
int target_emulate_inquiry(struct se_cmd *cmd);
int target_emulate_readcapacity(struct se_cmd *cmd);
int target_emulate_readcapacity_16(struct se_cmd *cmd);
int target_emulate_modesense(struct se_cmd *cmd);
int target_emulate_request_sense(struct se_cmd *cmd);
int target_emulate_unmap(struct se_cmd *cmd);
int target_emulate_write_same(struct se_cmd *cmd);
int target_emulate_synchronize_cache(struct se_cmd *cmd);
int target_emulate_noop(struct se_cmd *cmd);
/* target_core_device.c */ /* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *, int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *); struct se_portal_group *);
void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *); void core_update_device_list_access(u32, u32, struct se_node_acl *);
int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *, int); u32, u32, struct se_node_acl *, struct se_portal_group *);
int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *); void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *, int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *); struct se_lun *);
...@@ -56,8 +47,7 @@ int se_dev_set_max_sectors(struct se_device *, u32); ...@@ -56,8 +47,7 @@ int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32); int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32);
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *, struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
struct se_device *, u32);
int core_dev_del_lun(struct se_portal_group *, u32); int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
...@@ -104,7 +94,6 @@ void release_se_kmem_caches(void); ...@@ -104,7 +94,6 @@ void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t); u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void); void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int); void transport_cmd_finish_abort(struct se_cmd *, int);
void __target_remove_from_execute_list(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *); unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *, void transport_dump_dev_info(struct se_device *, struct se_lun *,
...@@ -116,6 +105,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); ...@@ -116,6 +105,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *); int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *); void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
/* target_core_stat.c */ /* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *); void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
......
...@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder( ...@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
* Check if write exclusive initiator ports *NOT* holding the * Check if write exclusive initiator ports *NOT* holding the
* WRITE_EXCLUSIVE_* reservation. * WRITE_EXCLUSIVE_* reservation.
*/ */
if ((we) && !(registered_nexus)) { if (we && !registered_nexus) {
if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->data_direction == DMA_TO_DEVICE) {
/* /*
* Conflict for write exclusive * Conflict for write exclusive
...@@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve( ...@@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve(
*/ */
spin_lock(&dev->dev_reservation_lock); spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder; pr_res_holder = dev->dev_pr_res_holder;
if ((pr_res_holder)) { if (pr_res_holder) {
/* /*
* From spc4r17 Section 5.7.9: Reserving: * From spc4r17 Section 5.7.9: Reserving:
* *
...@@ -4030,7 +4030,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) ...@@ -4030,7 +4030,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
spin_lock(&se_dev->dev_reservation_lock); spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder; pr_reg = se_dev->dev_pr_res_holder;
if ((pr_reg)) { if (pr_reg) {
/* /*
* Set the hardcoded Additional Length * Set the hardcoded Additional Length
*/ */
......
...@@ -35,8 +35,10 @@ ...@@ -35,8 +35,10 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/cdrom.h> #include <linux/cdrom.h>
#include <linux/file.h> #include <linux/ratelimit.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
...@@ -46,12 +48,14 @@ ...@@ -46,12 +48,14 @@
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include "target_core_alua.h"
#include "target_core_pscsi.h" #include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~')) #define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static struct se_subsystem_api pscsi_template; static struct se_subsystem_api pscsi_template;
static int pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int); static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba(): /* pscsi_attach_hba():
...@@ -1019,9 +1023,79 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -1019,9 +1023,79 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
return -ENOMEM; return -ENOMEM;
} }
static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, /*
u32 sgl_nents, enum dma_data_direction data_direction) * Clear a lun set in the cdb if the initiator talking to use spoke
* and old standards version, as we can't assume the underlying device
* won't choke up on it.
*/
static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
{
switch (cdb[0]) {
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */
case READ_16: /* SBC - RDProtect */
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
case VERIFY: /* SBC - VRProtect */
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
break;
}
}
static int pscsi_parse_cdb(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned int dummy_size;
int ret;
if (cmd->se_cmd_flags & SCF_BIDI) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
}
pscsi_clear_cdb_lun(cdb);
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else the default for pSCSI is to pass the command to the underlying
* LLD / physical hardware.
*/
switch (cdb[0]) {
case REPORT_LUNS:
ret = spc_parse_cdb(cmd, &dummy_size);
if (ret)
return ret;
break;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/* FALLTHROUGH*/
default:
cmd->execute_cmd = pscsi_execute_cmd;
break;
}
return 0;
}
static int pscsi_execute_cmd(struct se_cmd *cmd)
{ {
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt; struct pscsi_plugin_task *pt;
struct request *req; struct request *req;
...@@ -1042,7 +1116,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, ...@@ -1042,7 +1116,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
memcpy(pt->pscsi_cdb, cmd->t_task_cdb, memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb)); scsi_command_size(cmd->t_task_cdb));
if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (!sgl) {
req = blk_get_request(pdv->pdv_sd->request_queue, req = blk_get_request(pdv->pdv_sd->request_queue,
(data_direction == DMA_TO_DEVICE), (data_direction == DMA_TO_DEVICE),
GFP_KERNEL); GFP_KERNEL);
...@@ -1188,7 +1262,7 @@ static struct se_subsystem_api pscsi_template = { ...@@ -1188,7 +1262,7 @@ static struct se_subsystem_api pscsi_template = {
.create_virtdevice = pscsi_create_virtdevice, .create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device, .free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete, .transport_complete = pscsi_transport_complete,
.execute_cmd = pscsi_execute_cmd, .parse_cdb = pscsi_parse_cdb,
.check_configfs_dev_params = pscsi_check_configfs_dev_params, .check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params,
......
...@@ -284,9 +284,11 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) ...@@ -284,9 +284,11 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL; return NULL;
} }
static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, static int rd_execute_rw(struct se_cmd *cmd)
u32 sgl_nents, enum dma_data_direction data_direction)
{ {
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev; struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr; struct rd_dev *dev = se_dev->dev_ptr;
struct rd_dev_sg_table *table; struct rd_dev_sg_table *table;
...@@ -460,6 +462,15 @@ static sector_t rd_get_blocks(struct se_device *dev) ...@@ -460,6 +462,15 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long; return blocks_long;
} }
static struct spc_ops rd_spc_ops = {
.execute_rw = rd_execute_rw,
};
static int rd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &rd_spc_ops);
}
static struct se_subsystem_api rd_mcp_template = { static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp", .name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
...@@ -468,7 +479,7 @@ static struct se_subsystem_api rd_mcp_template = { ...@@ -468,7 +479,7 @@ static struct se_subsystem_api rd_mcp_template = {
.allocate_virtdevice = rd_allocate_virtdevice, .allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice, .create_virtdevice = rd_create_virtdevice,
.free_device = rd_free_device, .free_device = rd_free_device,
.execute_cmd = rd_execute_cmd, .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params, .check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params,
......
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_ua.h"
static int sbc_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
if (blocks_long >= 0x00000000ffffffff)
blocks = 0xffffffff;
else
blocks = (u32)blocks_long;
buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
buf[3] = (blocks >> 32) & 0xff;
buf[4] = (blocks >> 24) & 0xff;
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
int spc_get_write_same_sectors(struct se_cmd *cmd)
{
u32 num_blocks;
if (cmd->t_task_cdb[0] == WRITE_SAME)
num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
/*
* Use the explicit range when non zero is supplied, otherwise calculate
* the remaining range based on ->get_blocks() - starting LBA.
*/
if (num_blocks)
return num_blocks;
return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
cmd->t_task_lba + 1;
}
EXPORT_SYMBOL(spc_get_write_same_sectors);
static int sbc_emulate_verify(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0;
}
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
static int sbc_check_valid_sectors(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long long end_lba;
u32 sectors;
sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
cmd->t_task_lba, sectors, end_lba);
return -EINVAL;
}
return 0;
}
static inline u32 transport_get_sectors_6(unsigned char *cdb)
{
/*
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/
return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(unsigned char *cdb)
{
return (u32)(cdb[7] << 8) + cdb[8];
}
static inline u32 transport_get_sectors_12(unsigned char *cdb)
{
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}
static inline u32 transport_get_sectors_16(unsigned char *cdb)
{
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
(cdb[12] << 8) + cdb[13];
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32(unsigned char *cdb)
{
return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
(cdb[30] << 8) + cdb[31];
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
*/
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
static int sbc_write_same_supported(struct se_device *dev,
unsigned char *flags)
{
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
return -ENOSYS;
}
/*
* Currently for the emulated case we only accept
* tpws with the UNMAP=1 bit set.
*/
if (!(flags[0] & 0x08)) {
pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
return -ENOSYS;
}
return 0;
}
static void xdreadwrite_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
unsigned int offset;
int i;
int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
* 1) read the specified logical block(s);
* 2) transfer logical blocks from the data-out buffer;
* 3) XOR the logical blocks transferred from the data-out buffer with
* the logical blocks read, storing the resulting XOR data in a buffer;
* 4) if the DISABLE WRITE bit is set to zero, then write the logical
* blocks transferred from the data-out buffer; and
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
* Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
sg_copy_to_buffer(cmd->t_data_sg,
cmd->t_data_nents,
buf,
cmd->data_length);
/*
* Now perform the XOR against the BIDI read memory located at
* cmd->t_mem_bidi_list
*/
offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg));
if (!addr)
goto out;
for (i = 0; i < sg->length; i++)
*(addr + sg->offset + i) ^= *(buf + offset + i);
offset += sg->length;
kunmap_atomic(addr);
}
out:
kfree(buf);
}
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
{
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
unsigned int size;
u32 sectors = 0;
int ret;
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case WRITE_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
cmd->execute_cmd = ops->execute_rw;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->execute_cmd = ops->execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb);
/*
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
/*
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
cmd->execute_cmd = ops->execute_rw;
cmd->transport_complete_callback = &xdreadwrite_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
if (sbc_write_same_supported(dev, &cdb[10]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = ops->execute_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
}
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->execute_cmd = sbc_emulate_readcapacity;
break;
case SERVICE_ACTION_IN:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = sbc_emulate_readcapacity_16;
break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field;
}
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
if (!ops->execute_sync_cache)
goto out_unsupported_cdb;
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
size = sbc_get_size(cmd, sectors);
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
if (cmd->t_task_lba || sectors) {
if (sbc_check_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
}
cmd->execute_cmd = ops->execute_sync_cache;
break;
case UNMAP:
if (!ops->execute_unmap)
goto out_unsupported_cdb;
size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = ops->execute_unmap;
break;
case WRITE_SAME_16:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_16(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = ops->execute_write_same;
break;
case WRITE_SAME:
if (!ops->execute_write_same)
goto out_unsupported_cdb;
sectors = transport_get_sectors_10(cdb);
if (!sectors) {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
/*
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (sbc_write_same_supported(dev, &cdb[1]) < 0)
goto out_unsupported_cdb;
cmd->execute_cmd = ops->execute_write_same;
break;
case VERIFY:
size = 0;
cmd->execute_cmd = sbc_emulate_verify;
break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
return ret;
}
/* reject any command that we don't have a handler for */
if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
goto out_unsupported_cdb;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds fabric_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors);
goto out_invalid_cdb_field;
}
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
" big sectors %u exceeds backend hw_max_sectors:"
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.hw_max_sectors);
goto out_invalid_cdb_field;
}
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
goto out_invalid_cdb_field;
}
size = sbc_get_size(cmd, sectors);
}
ret = target_cmd_size_check(cmd, size);
if (ret < 0)
return ret;
return 0;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
EXPORT_SYMBOL(sbc_parse_cdb);
/* /*
* CDB emulation for non-READ/WRITE commands. * SCSI Primary Commands (SPC) parsing and emulation.
* *
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
...@@ -26,17 +26,21 @@ ...@@ -26,17 +26,21 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h> #include <target/target_core_base.h>
#include <target/target_core_backend.h> #include <target/target_core_backend.h>
#include <target/target_core_fabric.h> #include <target/target_core_fabric.h>
#include "target_core_internal.h" #include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h" #include "target_core_ua.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf) static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
{ {
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
...@@ -65,8 +69,7 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) ...@@ -65,8 +69,7 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
} }
static int static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{ {
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
...@@ -93,7 +96,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) ...@@ -93,7 +96,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
* Enable SCCS and TPGS fields for Emulated ALUA * Enable SCCS and TPGS fields for Emulated ALUA
*/ */
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf); spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */ buf[7] = 0x2; /* CmdQue=1 */
...@@ -106,8 +109,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf) ...@@ -106,8 +109,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
} }
/* unit serial number */ /* unit serial number */
static int static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
u16 len = 0; u16 len = 0;
...@@ -127,8 +129,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) ...@@ -127,8 +129,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0; return 0;
} }
static void static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) unsigned char *buf)
{ {
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
int cnt; int cnt;
...@@ -162,8 +164,7 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) ...@@ -162,8 +164,7 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
* Device identification VPD, for a complete list of * Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459. * DESIGNATOR TYPEs see spc4r17 Table 459.
*/ */
static int static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun; struct se_lun *lun = cmd->se_lun;
...@@ -220,7 +221,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -220,7 +221,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/ */
target_parse_naa_6h_vendor_specific(dev, &buf[off]); spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20; len = 20;
off = (len + 4); off = (len + 4);
...@@ -414,8 +415,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) ...@@ -414,8 +415,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
} }
/* Extended INQUIRY Data VPD Page */ /* Extended INQUIRY Data VPD Page */
static int static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{ {
buf[3] = 0x3c; buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */ /* Set HEADSUP, ORDSUP, SIMPSUP */
...@@ -428,15 +428,14 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) ...@@ -428,15 +428,14 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
} }
/* Block Limits VPD page */ /* Block Limits VPD page */
static int static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
u32 max_sectors; u32 max_sectors;
int have_tp = 0; int have_tp = 0;
/* /*
* Following sbc3r22 section 6.5.3 Block Limits VPD page, when * Following spc3r22 section 6.5.3 Block Limits VPD page, when
* emulate_tpu=1 or emulate_tpws=1 we will be expect a * emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning. * different page length for Thin Provisioning.
*/ */
...@@ -500,8 +499,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) ...@@ -500,8 +499,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
} }
/* Block Device Characteristics VPD page */ /* Block Device Characteristics VPD page */
static int static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
...@@ -513,13 +511,12 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) ...@@ -513,13 +511,12 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
} }
/* Thin Provisioning VPD */ /* Thin Provisioning VPD */
static int static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
/* /*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page: * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
* *
* The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
* zero, then the page length shall be set to 0004h. If the DP bit * zero, then the page length shall be set to 0004h. If the DP bit
...@@ -564,25 +561,23 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) ...@@ -564,25 +561,23 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0; return 0;
} }
static int static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct { static struct {
uint8_t page; uint8_t page;
int (*emulate)(struct se_cmd *, unsigned char *); int (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = { } evpd_handlers[] = {
{ .page = 0x00, .emulate = target_emulate_evpd_00 }, { .page = 0x00, .emulate = spc_emulate_evpd_00 },
{ .page = 0x80, .emulate = target_emulate_evpd_80 }, { .page = 0x80, .emulate = spc_emulate_evpd_80 },
{ .page = 0x83, .emulate = target_emulate_evpd_83 }, { .page = 0x83, .emulate = spc_emulate_evpd_83 },
{ .page = 0x86, .emulate = target_emulate_evpd_86 }, { .page = 0x86, .emulate = spc_emulate_evpd_86 },
{ .page = 0xb0, .emulate = target_emulate_evpd_b0 }, { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
{ .page = 0xb1, .emulate = target_emulate_evpd_b1 }, { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
{ .page = 0xb2, .emulate = target_emulate_evpd_b2 }, { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
}; };
/* supported vital product data pages */ /* supported vital product data pages */
static int static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{ {
int p; int p;
...@@ -601,7 +596,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) ...@@ -601,7 +596,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0; return 0;
} }
int target_emulate_inquiry(struct se_cmd *cmd) static int spc_emulate_inquiry(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
...@@ -643,7 +638,7 @@ int target_emulate_inquiry(struct se_cmd *cmd) ...@@ -643,7 +638,7 @@ int target_emulate_inquiry(struct se_cmd *cmd)
goto out; goto out;
} }
ret = target_emulate_inquiry_std(cmd, buf); ret = spc_emulate_inquiry_std(cmd, buf);
goto out; goto out;
} }
...@@ -671,70 +666,7 @@ int target_emulate_inquiry(struct se_cmd *cmd) ...@@ -671,70 +666,7 @@ int target_emulate_inquiry(struct se_cmd *cmd)
return ret; return ret;
} }
int target_emulate_readcapacity(struct se_cmd *cmd) static int spc_modesense_rwrecovery(unsigned char *p)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
if (blocks_long >= 0x00000000ffffffff)
blocks = 0xffffffff;
else
blocks = (u32)blocks_long;
buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
int target_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
buf[3] = (blocks >> 32) & 0xff;
buf[4] = (blocks >> 24) & 0xff;
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
transport_kunmap_data_sg(cmd);
target_complete_cmd(cmd, GOOD);
return 0;
}
static int
target_modesense_rwrecovery(unsigned char *p)
{ {
p[0] = 0x01; p[0] = 0x01;
p[1] = 0x0a; p[1] = 0x0a;
...@@ -742,8 +674,7 @@ target_modesense_rwrecovery(unsigned char *p) ...@@ -742,8 +674,7 @@ target_modesense_rwrecovery(unsigned char *p)
return 12; return 12;
} }
static int static int spc_modesense_control(struct se_device *dev, unsigned char *p)
target_modesense_control(struct se_device *dev, unsigned char *p)
{ {
p[0] = 0x0a; p[0] = 0x0a;
p[1] = 0x0a; p[1] = 0x0a;
...@@ -828,8 +759,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) ...@@ -828,8 +759,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
return 12; return 12;
} }
static int static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
target_modesense_caching(struct se_device *dev, unsigned char *p)
{ {
p[0] = 0x08; p[0] = 0x08;
p[1] = 0x12; p[1] = 0x12;
...@@ -840,8 +770,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) ...@@ -840,8 +770,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
return 20; return 20;
} }
static void static void spc_modesense_write_protect(unsigned char *buf, int type)
target_modesense_write_protect(unsigned char *buf, int type)
{ {
/* /*
* I believe that the WP bit (bit 7) in the mode header is the same for * I believe that the WP bit (bit 7) in the mode header is the same for
...@@ -856,8 +785,7 @@ target_modesense_write_protect(unsigned char *buf, int type) ...@@ -856,8 +785,7 @@ target_modesense_write_protect(unsigned char *buf, int type)
} }
} }
static void static void spc_modesense_dpofua(unsigned char *buf, int type)
target_modesense_dpofua(unsigned char *buf, int type)
{ {
switch (type) { switch (type) {
case TYPE_DISK: case TYPE_DISK:
...@@ -868,7 +796,7 @@ target_modesense_dpofua(unsigned char *buf, int type) ...@@ -868,7 +796,7 @@ target_modesense_dpofua(unsigned char *buf, int type)
} }
} }
int target_emulate_modesense(struct se_cmd *cmd) static int spc_emulate_modesense(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb; char *cdb = cmd->t_task_cdb;
...@@ -883,18 +811,18 @@ int target_emulate_modesense(struct se_cmd *cmd) ...@@ -883,18 +811,18 @@ int target_emulate_modesense(struct se_cmd *cmd)
switch (cdb[2] & 0x3f) { switch (cdb[2] & 0x3f) {
case 0x01: case 0x01:
length = target_modesense_rwrecovery(&buf[offset]); length = spc_modesense_rwrecovery(&buf[offset]);
break; break;
case 0x08: case 0x08:
length = target_modesense_caching(dev, &buf[offset]); length = spc_modesense_caching(dev, &buf[offset]);
break; break;
case 0x0a: case 0x0a:
length = target_modesense_control(dev, &buf[offset]); length = spc_modesense_control(dev, &buf[offset]);
break; break;
case 0x3f: case 0x3f:
length = target_modesense_rwrecovery(&buf[offset]); length = spc_modesense_rwrecovery(&buf[offset]);
length += target_modesense_caching(dev, &buf[offset+length]); length += spc_modesense_caching(dev, &buf[offset+length]);
length += target_modesense_control(dev, &buf[offset+length]); length += spc_modesense_control(dev, &buf[offset+length]);
break; break;
default: default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
...@@ -912,11 +840,11 @@ int target_emulate_modesense(struct se_cmd *cmd) ...@@ -912,11 +840,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve && (cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type); spc_modesense_write_protect(&buf[3], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type); spc_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length) if ((offset + 2) > cmd->data_length)
offset = cmd->data_length; offset = cmd->data_length;
...@@ -928,11 +856,11 @@ int target_emulate_modesense(struct se_cmd *cmd) ...@@ -928,11 +856,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve && (cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type); spc_modesense_write_protect(&buf[2], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type); spc_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length) if ((offset + 1) > cmd->data_length)
offset = cmd->data_length; offset = cmd->data_length;
...@@ -946,7 +874,7 @@ int target_emulate_modesense(struct se_cmd *cmd) ...@@ -946,7 +874,7 @@ int target_emulate_modesense(struct se_cmd *cmd)
return 0; return 0;
} }
int target_emulate_request_sense(struct se_cmd *cmd) static int spc_emulate_request_sense(struct se_cmd *cmd)
{ {
unsigned char *cdb = cmd->t_task_cdb; unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf; unsigned char *buf;
...@@ -1005,126 +933,172 @@ int target_emulate_request_sense(struct se_cmd *cmd) ...@@ -1005,126 +933,172 @@ int target_emulate_request_sense(struct se_cmd *cmd)
return 0; return 0;
} }
/* static int spc_emulate_testunitready(struct se_cmd *cmd)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
int target_emulate_unmap(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; target_complete_cmd(cmd, GOOD);
unsigned char *buf, *ptr = NULL; return 0;
unsigned char *cdb = &cmd->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
int ret = 0, offset;
unsigned short dl, bd_dl;
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
offset = 8;
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
buf = transport_kmap_data_sg(cmd);
ptr = &buf[offset];
pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n",
ret);
goto err;
}
ptr += 16;
size -= 16;
}
err:
transport_kunmap_data_sg(cmd);
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
} }
/* int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
int target_emulate_write_same(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
sector_t range; struct se_subsystem_dev *su_dev = dev->se_sub_dev;
sector_t lba = cmd->t_task_lba; unsigned char *cdb = cmd->t_task_cdb;
u32 num_blocks;
int ret;
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
/*
* Use the explicit range when non zero is supplied, otherwise calculate
* the remaining range based on ->get_blocks() - starting LBA.
*/
if (num_blocks != 0)
range = num_blocks;
else
range = (dev->transport->get_blocks(dev) - lba) + 1;
pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
(unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range); switch (cdb[0]) {
if (ret < 0) { case MODE_SELECT:
pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); *size = cdb[4];
return ret; break;
} case MODE_SELECT_10:
*size = (cdb[7] << 8) + cdb[8];
break;
case MODE_SENSE:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_modesense;
break;
case MODE_SENSE_10:
*size = (cdb[7] << 8) + cdb[8];
cmd->execute_cmd = spc_emulate_modesense;
break;
case LOG_SELECT:
case LOG_SENSE:
*size = (cdb[7] << 8) + cdb[8];
break;
case PERSISTENT_RESERVE_IN:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_in;
*size = (cdb[7] << 8) + cdb[8];
break;
case PERSISTENT_RESERVE_OUT:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_out;
*size = (cdb[7] << 8) + cdb[8];
break;
case RELEASE:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
*size = (cdb[7] << 8) | cdb[8];
else
*size = cmd->data_length;
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
cmd->execute_cmd = target_scsi2_reservation_release;
break;
case RESERVE:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RESERVE_10)
*size = (cdb[7] << 8) | cdb[8];
else
*size = cmd->data_length;
target_complete_cmd(cmd, GOOD); /*
return 0; * Setup the legacy emulated handler for SPC-2 and
} * >= SPC-3 compatible reservation handling (CRH=1)
* Otherwise, we assume the underlying SCSI logic is
* is running in SPC_PASSTHROUGH, and wants reservations
* emulation disabled.
*/
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
cmd->execute_cmd = target_scsi2_reservation_reserve;
break;
case REQUEST_SENSE:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_request_sense;
break;
case INQUIRY:
*size = (cdb[3] << 8) + cdb[4];
int target_emulate_synchronize_cache(struct se_cmd *cmd) /*
{ * Do implict HEAD_OF_QUEUE processing for INQUIRY.
if (!cmd->se_dev->transport->do_sync_cache) { * See spc4r17 section 5.3
pr_err("SYNCHRONIZE_CACHE emulation not supported" */
" for: %s\n", cmd->se_dev->transport->name); if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
break;
case EXTENDED_COPY:
case READ_ATTRIBUTE:
case RECEIVE_COPY_RESULTS:
case WRITE_ATTRIBUTE:
*size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
*size = (cdb[3] << 8) | cdb[4];
break;
case WRITE_BUFFER:
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
break;
case REPORT_LUNS:
cmd->execute_cmd = target_report_luns;
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
*size = 0;
break;
case MAINTENANCE_IN:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/*
* MAINTENANCE_IN from SCC-2
* Check for emulated MI_REPORT_TARGET_PGS
*/
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
*size = get_unaligned_be32(&cdb[6]);
} else {
/*
* GPCMD_SEND_KEY from multi media commands
*/
*size = get_unaligned_be16(&cdb[8]);
}
break;
case MAINTENANCE_OUT:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/*
* MAINTENANCE_OUT from SCC-2
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
*size = get_unaligned_be32(&cdb[6]);
} else {
/*
* GPCMD_SEND_KEY from multi media commands
*/
*size = get_unaligned_be16(&cdb[8]);
}
break;
default:
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS; return -EINVAL;
} }
cmd->se_dev->transport->do_sync_cache(cmd);
return 0;
}
int target_emulate_noop(struct se_cmd *cmd)
{
target_complete_cmd(cmd, GOOD);
return 0; return 0;
} }
EXPORT_SYMBOL(spc_parse_cdb);
...@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list( ...@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list(
list_move_tail(&cmd->state_list, &drain_task_list); list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false; cmd->state_active = false;
if (!list_empty(&cmd->execute_list))
__target_remove_from_execute_list(cmd);
} }
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->execute_task_lock, flags);
...@@ -354,57 +351,6 @@ static void core_tmr_drain_state_list( ...@@ -354,57 +351,6 @@ static void core_tmr_drain_state_list(
} }
} }
static void core_tmr_drain_cmd_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_node_acl *tmr_nacl,
int tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_cmd_list);
struct se_queue_obj *qobj = &dev->dev_queue_obj;
struct se_cmd *cmd, *tcmd;
unsigned long flags;
/*
* Release all commands remaining in the per-device command queue.
*
* This follows the same logic as above for the state list.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
cmd->transport_state &= ~CMD_T_QUEUED;
atomic_dec(&qobj->queue_cnt);
list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
while (!list_empty(&drain_cmd_list)) {
cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
list_del_init(&cmd->se_queue_node);
pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
}
}
int core_tmr_lun_reset( int core_tmr_lun_reset(
struct se_device *dev, struct se_device *dev,
struct se_tmr_req *tmr, struct se_tmr_req *tmr,
...@@ -447,8 +393,7 @@ int core_tmr_lun_reset( ...@@ -447,8 +393,7 @@ int core_tmr_lun_reset(
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list); preempt_and_abort_list);
core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
/* /*
* Clear any legacy SPC-2 reservation when called during * Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET * LOGICAL UNIT RESET
......
...@@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg( ...@@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg(
lun = deve->se_lun; lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, deve->mapped_lun, core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
} }
...@@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs( ...@@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs(
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY"); "READ-WRITE" : "READ-ONLY");
core_update_device_list_for_node(lun, NULL, lun->unpacked_lun, core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access, acl, tpg, 1); lun_access, acl, tpg);
spin_lock(&tpg->tpg_lun_lock); spin_lock(&tpg->tpg_lun_lock);
} }
spin_unlock(&tpg->tpg_lun_lock); spin_unlock(&tpg->tpg_lun_lock);
...@@ -306,10 +306,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( ...@@ -306,10 +306,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
* TPG LUNs if the fabric is not explictly asking for * TPG LUNs if the fabric is not explictly asking for
* tpg_check_demo_mode_login_only() == 1. * tpg_check_demo_mode_login_only() == 1.
*/ */
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
do { ; } while (0);
else
core_tpg_add_node_to_devs(acl, tpg); core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
......
...@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; ...@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd, static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev); struct se_device *dev);
static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_get_mem(struct se_cmd *cmd);
static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
static void transport_put_cmd(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void target_complete_ok_work(struct work_struct *work); static void target_complete_ok_work(struct work_struct *work);
...@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type) ...@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index; return new_index;
} }
static void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
INIT_LIST_HEAD(&qobj->qobj_list);
init_waitqueue_head(&qobj->thread_wq);
spin_lock_init(&qobj->cmd_queue_lock);
}
void transport_subsystem_check_init(void) void transport_subsystem_check_init(void)
{ {
int ret; int ret;
...@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void) ...@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list); INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list); INIT_LIST_HEAD(&se_sess->sess_cmd_list);
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock); spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref); kref_init(&se_sess->sess_kref);
...@@ -468,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd) ...@@ -468,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->execute_task_lock, flags);
} }
/* transport_cmd_check_stop(): static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
*
* 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
* 'transport_off = 2' determines if task_dev_state should be removed.
*
* A non-zero u8 t_state sets cmd->t_state.
* Returns 1 when command is stopped, else 0.
*/
static int transport_cmd_check_stop(
struct se_cmd *cmd,
int transport_off,
u8 t_state)
{ {
unsigned long flags; unsigned long flags;
...@@ -493,13 +470,23 @@ static int transport_cmd_check_stop( ...@@ -493,13 +470,23 @@ static int transport_cmd_check_stop(
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->transport_state &= ~CMD_T_ACTIVE; cmd->transport_state &= ~CMD_T_ACTIVE;
if (transport_off == 2) if (remove_from_lists)
target_remove_from_state_list(cmd); target_remove_from_state_list(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->transport_lun_stop_comp); complete(&cmd->transport_lun_stop_comp);
return 1; return 1;
} }
if (remove_from_lists) {
target_remove_from_state_list(cmd);
/*
* Clear struct se_cmd->se_lun before the handoff to FE.
*/
cmd->se_lun = NULL;
}
/* /*
* Determine if frontend context caller is requesting the stopping of * Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions. * this command for frontend exceptions.
...@@ -509,58 +496,36 @@ static int transport_cmd_check_stop( ...@@ -509,58 +496,36 @@ static int transport_cmd_check_stop(
__func__, __LINE__, __func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
if (transport_off == 2)
target_remove_from_state_list(cmd);
/*
* Clear struct se_cmd->se_lun before the transport_off == 2 handoff
* to FE.
*/
if (transport_off == 2)
cmd->se_lun = NULL;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp); complete(&cmd->t_transport_stop_comp);
return 1; return 1;
} }
if (transport_off) {
cmd->transport_state &= ~CMD_T_ACTIVE; cmd->transport_state &= ~CMD_T_ACTIVE;
if (transport_off == 2) { if (remove_from_lists) {
target_remove_from_state_list(cmd); /*
/* * Some fabric modules like tcm_loop can release
* Clear struct se_cmd->se_lun before the transport_off == 2 * their internally allocated I/O reference now and
* handoff to fabric module. * struct se_cmd now.
*/ *
cmd->se_lun = NULL; * Fabric modules are expected to return '1' here if the
/* * se_cmd being passed is released at this point,
* Some fabric modules like tcm_loop can release * or zero if not being released.
* their internally allocated I/O reference now and */
* struct se_cmd now. if (cmd->se_tfo->check_stop_free != NULL) {
* spin_unlock_irqrestore(&cmd->t_state_lock, flags);
* Fabric modules are expected to return '1' here if the return cmd->se_tfo->check_stop_free(cmd);
* se_cmd being passed is released at this point,
* or zero if not being released.
*/
if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
&cmd->t_state_lock, flags);
return cmd->se_tfo->check_stop_free(cmd);
}
} }
spin_unlock_irqrestore(&cmd->t_state_lock, flags); }
return 0;
} else if (t_state)
cmd->t_state = t_state;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0; return 0;
} }
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{ {
return transport_cmd_check_stop(cmd, 2, 0); return transport_cmd_check_stop(cmd, true);
} }
static void transport_lun_remove_cmd(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd)
...@@ -591,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) ...@@ -591,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd)) if (transport_cmd_check_stop_to_fabric(cmd))
return; return;
if (remove) { if (remove)
transport_remove_cmd_from_queue(cmd);
transport_put_cmd(cmd); transport_put_cmd(cmd);
}
}
static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
bool at_head)
{
struct se_device *dev = cmd->se_dev;
struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
if (t_state) {
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
cmd->transport_state |= CMD_T_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
/* If the cmd is already on the list, remove it before we add it */
if (!list_empty(&cmd->se_queue_node))
list_del(&cmd->se_queue_node);
else
atomic_inc(&qobj->queue_cnt);
if (at_head)
list_add(&cmd->se_queue_node, &qobj->qobj_list);
else
list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
cmd->transport_state |= CMD_T_QUEUED;
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
wake_up_interruptible(&qobj->thread_wq);
}
static struct se_cmd *
transport_get_cmd_from_queue(struct se_queue_obj *qobj)
{
struct se_cmd *cmd;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
if (list_empty(&qobj->qobj_list)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return NULL;
}
cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
cmd->transport_state &= ~CMD_T_QUEUED;
list_del_init(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return cmd;
}
static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
{
struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
if (!(cmd->transport_state & CMD_T_QUEUED)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
cmd->transport_state &= ~CMD_T_QUEUED;
atomic_dec(&qobj->queue_cnt);
list_del_init(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
} }
static void target_complete_failure_work(struct work_struct *work) static void target_complete_failure_work(struct work_struct *work)
...@@ -742,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd) ...@@ -742,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->execute_task_lock, flags);
} }
static void __target_add_to_execute_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
bool head_of_queue = false;
if (!list_empty(&cmd->execute_list))
return;
if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
cmd->sam_task_attr == MSG_HEAD_TAG)
head_of_queue = true;
if (head_of_queue)
list_add(&cmd->execute_list, &dev->execute_list);
else
list_add_tail(&cmd->execute_list, &dev->execute_list);
atomic_inc(&dev->execute_tasks);
if (cmd->state_active)
return;
if (head_of_queue)
list_add(&cmd->state_list, &dev->state_list);
else
list_add_tail(&cmd->state_list, &dev->state_list);
cmd->state_active = true;
}
static void target_add_to_execute_list(struct se_cmd *cmd)
{
unsigned long flags;
struct se_device *dev = cmd->se_dev;
spin_lock_irqsave(&dev->execute_task_lock, flags);
__target_add_to_execute_list(cmd);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
void __target_remove_from_execute_list(struct se_cmd *cmd)
{
list_del_init(&cmd->execute_list);
atomic_dec(&cmd->se_dev->execute_tasks);
}
static void target_remove_from_execute_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
if (WARN_ON(list_empty(&cmd->execute_list)))
return;
spin_lock_irqsave(&dev->execute_task_lock, flags);
__target_remove_from_execute_list(cmd);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
/* /*
* Handle QUEUE_FULL / -EAGAIN and -ENOMEM status * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/ */
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);
static void target_qf_do_work(struct work_struct *work) static void target_qf_do_work(struct work_struct *work)
{ {
...@@ -827,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work) ...@@ -827,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work)
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN"); : "UNKNOWN");
transport_add_cmd_to_queue(cmd, cmd->t_state, true); if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd);
else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
transport_complete_qf(cmd);
} }
} }
...@@ -874,8 +714,7 @@ void transport_dump_dev_state( ...@@ -874,8 +714,7 @@ void transport_dump_dev_state(
break; break;
} }
*bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d", *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.block_size,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors); dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
...@@ -1212,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1212,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba(
return NULL; return NULL;
} }
transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags; dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
dev->dev_ptr = transport_dev; dev->dev_ptr = transport_dev;
...@@ -1222,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1222,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list); INIT_LIST_HEAD(&dev->qf_cmd_list);
...@@ -1261,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1261,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba(
* Setup the Asymmetric Logical Unit Assignment for struct se_device * Setup the Asymmetric Logical Unit Assignment for struct se_device
*/ */
if (core_setup_alua(dev, force_pt) < 0) if (core_setup_alua(dev, force_pt) < 0)
goto out; goto err_dev_list;
/* /*
* Startup the struct se_device processing thread * Startup the struct se_device processing thread
*/ */
dev->process_thread = kthread_run(transport_processing_thread, dev, dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
"LIO_%s", dev->transport->name); dev->transport->name);
if (IS_ERR(dev->process_thread)) { if (!dev->tmr_wq) {
pr_err("Unable to create kthread: LIO_%s\n", pr_err("Unable to create tmr workqueue for %s\n",
dev->transport->name); dev->transport->name);
goto out; goto err_dev_list;
} }
/* /*
* Setup work_queue for QUEUE_FULL * Setup work_queue for QUEUE_FULL
...@@ -1289,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1289,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba(
if (!inquiry_prod || !inquiry_rev) { if (!inquiry_prod || !inquiry_rev) {
pr_err("All non TCM/pSCSI plugins require" pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n"); " INQUIRY consts\n");
goto out; goto err_wq;
} }
strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
...@@ -1299,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1299,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba(
scsi_dump_inquiry(dev); scsi_dump_inquiry(dev);
return dev; return dev;
out:
kthread_stop(dev->process_thread);
err_wq:
destroy_workqueue(dev->tmr_wq);
err_dev_list:
spin_lock(&hba->device_lock); spin_lock(&hba->device_lock);
list_del(&dev->dev_list); list_del(&dev->dev_list);
hba->dev_count--; hba->dev_count--;
...@@ -1315,35 +1153,54 @@ struct se_device *transport_add_device_to_core_hba( ...@@ -1315,35 +1153,54 @@ struct se_device *transport_add_device_to_core_hba(
} }
EXPORT_SYMBOL(transport_add_device_to_core_hba); EXPORT_SYMBOL(transport_add_device_to_core_hba);
/* transport_generic_prepare_cdb(): int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*
* Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
* contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
* The point of this is since we are mapping iSCSI LUNs to
* SCSI Target IDs having a non-zero LUN in the CDB will throw the
* devices and HBAs for a loop.
*/
static inline void transport_generic_prepare_cdb(
unsigned char *cdb)
{ {
switch (cdb[0]) { struct se_device *dev = cmd->se_dev;
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */ if (cmd->unknown_data_length) {
case READ_16: /* SBC - RDProtect */ cmd->data_length = size;
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ } else if (size != cmd->data_length) {
case VERIFY: /* SBC - VRProtect */ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
case VERIFY_16: /* SBC - VRProtect */ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
case WRITE_VERIFY: /* SBC - VRProtect */ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
case WRITE_VERIFY_12: /* SBC - VRProtect */ cmd->data_length, size, cmd->t_task_cdb[0]);
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break; cmd->cmd_spdtl = size;
default:
cdb[1] &= 0x1f; /* clear logical unit number */ if (cmd->data_direction == DMA_TO_DEVICE) {
break; pr_err("Rejecting underflow/overflow"
" WRITE data\n");
goto out_invalid_cdb_field;
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_CDB.
*/
if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
cmd->residual_count = (size - cmd->data_length);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
}
cmd->data_length = size;
} }
}
static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); return 0;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/* /*
* Used by fabric modules containing a local struct se_cmd within their * Used by fabric modules containing a local struct se_cmd within their
...@@ -1361,9 +1218,7 @@ void transport_init_se_cmd( ...@@ -1361,9 +1218,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->execute_list);
INIT_LIST_HEAD(&cmd->state_list); INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_fe_stop_comp);
init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->transport_lun_stop_comp);
...@@ -1418,9 +1273,12 @@ int target_setup_cmd_from_cdb( ...@@ -1418,9 +1273,12 @@ int target_setup_cmd_from_cdb(
struct se_cmd *cmd, struct se_cmd *cmd,
unsigned char *cdb) unsigned char *cdb)
{ {
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
u32 pr_reg_type = 0;
u8 alua_ascq = 0;
unsigned long flags;
int ret; int ret;
transport_generic_prepare_cdb(cdb);
/* /*
* Ensure that the received CDB is less than the max (252 + 8) bytes * Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD * for VARIABLE_LENGTH_CMD
...@@ -1457,15 +1315,66 @@ int target_setup_cmd_from_cdb( ...@@ -1457,15 +1315,66 @@ int target_setup_cmd_from_cdb(
* Copy the original CDB into cmd-> * Copy the original CDB into cmd->
*/ */
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
/*
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
}
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
* by the ALUA primary or secondary access state..
*/
if (ret > 0) {
pr_debug("[%s]: ALUA TG Port not available, "
"SenseKey: NOT_READY, ASC/ASCQ: "
"0x04/0x%02x\n",
cmd->se_tfo->get_fabric_name(), alua_ascq);
transport_set_sense_codes(cmd, 0x04, alua_ascq);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
return -EINVAL;
}
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/* /*
* Setup the received CDB based on SCSI defined opcodes and * Check status for SPC-3 Persistent Reservations
* perform unit attention, persistent reservations and ALUA
* checks for virtual device backends. The cmd->t_task_cdb
* pointer is expected to be setup before we reach this point.
*/ */
ret = transport_generic_cmd_sequencer(cmd, cdb); if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
* SPC-3 Persistent Reservation.
*/
}
ret = cmd->se_dev->transport->parse_cdb(cmd);
if (ret < 0) if (ret < 0)
return ret; return ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/* /*
* Check for SAM Task Attribute Emulation * Check for SAM Task Attribute Emulation
*/ */
...@@ -1503,10 +1412,9 @@ int transport_handle_cdb_direct( ...@@ -1503,10 +1412,9 @@ int transport_handle_cdb_direct(
return -EINVAL; return -EINVAL;
} }
/* /*
* Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
* transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() * outstanding descriptors are handled correctly during shutdown via
* in existing usage to ensure that outstanding descriptors are handled * transport_wait_for_tasks()
* correctly during shutdown via transport_wait_for_tasks()
* *
* Also, we don't take cmd->t_state_lock here as we only expect * Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission. * this to be called for initial descriptor submission.
...@@ -1540,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct); ...@@ -1540,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
* @data_dir: DMA data direction * @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables * @flags: flags for command submission from target_sc_flags_tables
* *
* Returns non zero to signal active I/O shutdown failure. All other
* setup exceptions will be returned as a SCSI CHECK_CONDITION response,
* but still return zero here.
*
* This may only be called from process context, and also currently * This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core. * assumes internal allocation of fabric payload buffer by target-core.
**/ **/
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags) u32 data_length, int task_attr, int data_dir, int flags)
{ {
...@@ -1569,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, ...@@ -1569,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
* for fabrics using TARGET_SCF_ACK_KREF that expect a second * for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement. * kref_put() to happen during fabric packet acknowledgement.
*/ */
target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
if (rc)
return rc;
/* /*
* Signal bidirectional data payloads to target-core * Signal bidirectional data payloads to target-core
*/ */
...@@ -1582,16 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, ...@@ -1582,16 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
transport_send_check_condition_and_sense(se_cmd, transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0); se_cmd->scsi_sense_reason, 0);
target_put_sess_cmd(se_sess, se_cmd); target_put_sess_cmd(se_sess, se_cmd);
return; return 0;
} }
/*
* Sanitize CDBs via transport_generic_cmd_sequencer() and
* allocate the necessary tasks to complete the received CDB+data
*/
rc = target_setup_cmd_from_cdb(se_cmd, cdb); rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) { if (rc != 0) {
transport_generic_request_failure(se_cmd); transport_generic_request_failure(se_cmd);
return; return 0;
} }
/* /*
...@@ -1600,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, ...@@ -1600,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
*/ */
core_alua_check_nonop_delay(se_cmd); core_alua_check_nonop_delay(se_cmd);
/*
* Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
* for immediate execution of READs, otherwise wait for
* transport_generic_handle_data() to be called for WRITEs
* when fabric has filled the incoming buffer.
*/
transport_handle_cdb_direct(se_cmd); transport_handle_cdb_direct(se_cmd);
return; return 0;
} }
EXPORT_SYMBOL(target_submit_cmd); EXPORT_SYMBOL(target_submit_cmd);
...@@ -1662,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, ...@@ -1662,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_cmd->se_tmr_req->ref_task_tag = tag; se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */ /* See target_submit_cmd for commentary */
target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
if (ret) {
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
if (ret) { if (ret) {
...@@ -1679,67 +1588,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, ...@@ -1679,67 +1588,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
} }
EXPORT_SYMBOL(target_submit_tmr); EXPORT_SYMBOL(target_submit_tmr);
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
* complete setup in TCM process context w/ TFO->new_cmd_map().
*/
int transport_generic_handle_cdb_map(
struct se_cmd *cmd)
{
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_cdb_map);
/* transport_generic_handle_data():
*
*
*/
int transport_generic_handle_data(
struct se_cmd *cmd)
{
/*
* For the software fabric case, then we assume the nexus is being
* failed/shutdown when signals are pending from the kthread context
* caller, so we return a failure. For the HW target mode case running
* in interrupt code, the signal_pending() check is skipped.
*/
if (!in_interrupt() && signal_pending(current))
return -EPERM;
/*
* If the received CDB has aleady been ABORTED by the generic
* target engine, we now call transport_check_aborted_status()
* to queue any delated TASK_ABORTED status for the received CDB to the
* fabric module as we are expecting no further incoming DATA OUT
* sequences at this point.
*/
if (transport_check_aborted_status(cmd, 1) != 0)
return 0;
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_data);
/* transport_generic_handle_tmr():
*
*
*/
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
/* /*
* If the cmd is active, request it to be stopped and sleep until it * If the cmd is active, request it to be stopped and sleep until it
* has completed. * has completed.
...@@ -1797,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) ...@@ -1797,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE: case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED: case TCM_WRITE_PROTECTED:
case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY: case TCM_CHECK_CONDITION_NOT_READY:
...@@ -1832,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd) ...@@ -1832,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break; break;
} }
/*
* If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
* make the call to transport_send_check_condition_and_sense()
* directly. Otherwise expect the fabric to make the call to
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
ret = transport_send_check_condition_and_sense(cmd, ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0); cmd->scsi_sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM) if (ret == -EAGAIN || ret == -ENOMEM)
...@@ -1856,1193 +1699,204 @@ void transport_generic_request_failure(struct se_cmd *cmd) ...@@ -1856,1193 +1699,204 @@ void transport_generic_request_failure(struct se_cmd *cmd)
} }
EXPORT_SYMBOL(transport_generic_request_failure); EXPORT_SYMBOL(transport_generic_request_failure);
static inline u32 transport_lba_21(unsigned char *cdb) static void __target_execute_cmd(struct se_cmd *cmd)
{
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
/*
* For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
*/
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
{
unsigned int __v1, __v2;
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
}
static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
unsigned long flags;
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
/*
* Called from Fabric Module context from transport_execute_tasks()
*
* The return of this function determins if the tasks from struct se_cmd
* get added to the execution queue in transport_execute_tasks(),
* or are added to the delayed or ordered lists here.
*/
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{ {
if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) int error = 0;
return 1;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
pr_debug("Added ORDERED for CDB: 0x%02x to ordered" spin_lock_irq(&cmd->t_state_lock);
" list, se_ordered_id: %u\n", cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
cmd->t_task_cdb[0], spin_unlock_irq(&cmd->t_state_lock);
cmd->se_ordered_id);
/*
* Add ORDERED command to tail of execution queue if
* no other older commands exist that need to be
* completed first.
*/
if (!atomic_read(&cmd->se_dev->simple_cmds))
return 1;
} else {
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
atomic_inc(&cmd->se_dev->simple_cmds);
smp_mb__after_atomic_inc();
}
/*
* Otherwise if one or more outstanding ORDERED task attribute exist,
* add the dormant task(s) built for the passed struct se_cmd to the
* execution queue and become in Active state for this struct se_device.
*/
if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
/*
* Otherwise, add cmd w/ tasks to delayed cmd queue that
* will be drained upon completion of HEAD_OF_QUEUE task.
*/
spin_lock(&cmd->se_dev->delayed_cmd_lock);
cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
list_add_tail(&cmd->se_delayed_node,
&cmd->se_dev->delayed_cmd_list);
spin_unlock(&cmd->se_dev->delayed_cmd_lock);
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
" delayed CMD list, se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->sam_task_attr,
cmd->se_ordered_id);
/*
* Return zero to let transport_execute_tasks() know
* not to add the delayed tasks to the execution list.
*/
return 0;
}
/*
* Otherwise, no ORDERED task attributes exist..
*/
return 1;
}
/*
* Called from fabric module context in transport_generic_new_cmd() and
* transport_generic_process_write()
*/
static void transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
struct se_device *se_dev = cmd->se_dev;
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
*/
if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
/*
* Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
* attribute for the tasks of the received struct se_cmd CDB
*/
add_tasks = transport_execute_task_attr(cmd);
if (add_tasks) {
__transport_execute_tasks(se_dev, cmd);
return;
}
}
__transport_execute_tasks(se_dev, NULL);
}
static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
{
int error;
struct se_cmd *cmd = NULL;
unsigned long flags;
check_depth:
spin_lock_irq(&dev->execute_task_lock);
if (new_cmd != NULL)
__target_add_to_execute_list(new_cmd);
if (list_empty(&dev->execute_list)) {
spin_unlock_irq(&dev->execute_task_lock);
return 0;
}
cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
__target_remove_from_execute_list(cmd);
spin_unlock_irq(&dev->execute_task_lock);
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->transport_state |= CMD_T_BUSY;
cmd->transport_state |= CMD_T_SENT;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (cmd->execute_cmd) if (cmd->execute_cmd)
error = cmd->execute_cmd(cmd); error = cmd->execute_cmd(cmd);
else {
error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
cmd->t_data_nents, cmd->data_direction);
}
if (error != 0) { if (error) {
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_BUSY; cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_generic_request_failure(cmd); transport_generic_request_failure(cmd);
} }
new_cmd = NULL;
goto check_depth;
return 0;
} }
static inline u32 transport_get_sectors_6( void target_execute_cmd(struct se_cmd *cmd)
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
/* /*
* Assume TYPE_DISK for non struct se_device objects. * If the received CDB has aleady been aborted stop processing it here.
* Use 8-bit sector value.
*/
if (!dev)
goto type_disk;
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 8-bit sector value. SBC-3 says:
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written. Any other value
* specifies the number of logical blocks that shall be
* written.
*/ */
type_disk: if (transport_check_aborted_status(cmd, 1))
return cdb[4] ? : 256; return;
}
static inline u32 transport_get_sectors_10(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/* /*
* Assume TYPE_DISK for non struct se_device objects. * Determine if IOCTL context caller in requesting the stopping of this
* Use 16-bit sector value. * command for LUN shutdown purposes.
*/ */
if (!dev) spin_lock_irq(&cmd->t_state_lock);
goto type_disk; if (cmd->transport_state & CMD_T_LUN_STOP) {
pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
/* cmd->transport_state &= ~CMD_T_ACTIVE;
* XXX_10 is not defined in SSC, throw an exception spin_unlock_irq(&cmd->t_state_lock);
*/ complete(&cmd->transport_lun_stop_comp);
if (dev->transport->get_device_type(dev) == TYPE_TAPE) { return;
*ret = -EINVAL;
return 0;
} }
/*
* Everything else assume TYPE_DISK Sector CDB location.
* Use 16-bit sector value.
*/
type_disk:
return (u32)(cdb[7] << 8) + cdb[8];
}
static inline u32 transport_get_sectors_12(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/* /*
* Assume TYPE_DISK for non struct se_device objects. * Determine if frontend context caller is requesting the stopping of
* Use 32-bit sector value. * this command for frontend exceptions.
*/ */
if (!dev) if (cmd->transport_state & CMD_T_STOP) {
goto type_disk; pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
/* spin_unlock_irq(&cmd->t_state_lock);
* XXX_12 is not defined in SSC, throw an exception complete(&cmd->t_transport_stop_comp);
*/ return;
if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
*ret = -EINVAL;
return 0;
} }
/* cmd->t_state = TRANSPORT_PROCESSING;
* Everything else assume TYPE_DISK Sector CDB location. spin_unlock_irq(&cmd->t_state_lock);
* Use 32-bit sector value.
*/
type_disk:
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
}
static inline u32 transport_get_sectors_16( if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
unsigned char *cdb, goto execute;
struct se_cmd *cmd,
int *ret)
{
struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/
if (!dev)
goto type_disk;
/* /*
* Use 24-bit allocation length for TYPE_TAPE. * Check for the existence of HEAD_OF_QUEUE, and if true return 1
*/ * to allow the passed struct se_cmd list of tasks to the front of the list.
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
(cdb[12] << 8) + cdb[13];
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32(
unsigned char *cdb,
struct se_cmd *cmd,
int *ret)
{
/*
* Assume TYPE_DISK for non struct se_device objects.
* Use 32-bit sector value.
*/ */
return (u32)(cdb[28] << 24) + (cdb[29] << 16) + switch (cmd->sam_task_attr) {
(cdb[30] << 8) + cdb[31]; case MSG_HEAD_TAG:
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
"se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
goto execute;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
} pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
" se_ordered_id: %u\n",
cmd->t_task_cdb[0], cmd->se_ordered_id);
static inline u32 transport_get_size( /*
u32 sectors, * Execute an ORDERED command if no other older commands
unsigned char *cdb, * exist that need to be completed first.
struct se_cmd *cmd) */
{ if (!atomic_read(&dev->simple_cmds))
struct se_device *dev = cmd->se_dev; goto execute;
break;
if (dev->transport->get_device_type(dev) == TYPE_TAPE) { default:
if (cdb[1] & 1) { /* sectors */ /*
return dev->se_sub_dev->se_dev_attrib.block_size * sectors; * For SIMPLE and UNTAGGED Task Attribute commands
} else /* bytes */ */
return sectors; atomic_inc(&dev->simple_cmds);
smp_mb__after_atomic_inc();
break;
} }
pr_debug("Returning block_size: %u, sectors: %u == %u for" if (atomic_read(&dev->dev_ordered_sync) != 0) {
" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, spin_lock(&dev->delayed_cmd_lock);
sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
dev->transport->name); spin_unlock(&dev->delayed_cmd_lock);
return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
static void transport_xor_callback(struct se_cmd *cmd) pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
{ " delayed CMD list, se_ordered_id: %u\n",
unsigned char *buf, *addr; cmd->t_task_cdb[0], cmd->sam_task_attr,
struct scatterlist *sg; cmd->se_ordered_id);
unsigned int offset;
int i;
int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
* 1) read the specified logical block(s);
* 2) transfer logical blocks from the data-out buffer;
* 3) XOR the logical blocks transferred from the data-out buffer with
* the logical blocks read, storing the resulting XOR data in a buffer;
* 4) if the DISABLE WRITE bit is set to zero, then write the logical
* blocks transferred from the data-out buffer; and
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate xor_callback buf\n");
return; return;
} }
/*
* Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
sg_copy_to_buffer(cmd->t_data_sg,
cmd->t_data_nents,
buf,
cmd->data_length);
execute:
/* /*
* Now perform the XOR against the BIDI read memory located at * Otherwise, no ORDERED task attributes exist..
* cmd->t_mem_bidi_list
*/ */
__target_execute_cmd(cmd);
offset = 0;
for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
addr = kmap_atomic(sg_page(sg));
if (!addr)
goto out;
for (i = 0; i < sg->length; i++)
*(addr + sg->offset + i) ^= *(buf + offset + i);
offset += sg->length;
kunmap_atomic(addr);
}
out:
kfree(buf);
} }
EXPORT_SYMBOL(target_execute_cmd);
/* /*
* Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
*/ */
static int transport_get_sense_data(struct se_cmd *cmd) static int transport_get_sense_data(struct se_cmd *cmd)
{ {
unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned long flags; unsigned long flags;
u32 offset = 0; u32 offset = 0;
WARN_ON(!cmd->se_lun);
if (!dev)
return 0;
spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
goto out;
if (!dev->transport->get_sense_buffer) {
pr_err("dev->transport->get_sense_buffer is NULL\n");
goto out;
}
sense_buffer = dev->transport->get_sense_buffer(cmd);
if (!sense_buffer) {
pr_err("ITT 0x%08x cmd %p: Unable to locate"
" sense buffer for task with sense\n",
cmd->se_tfo->get_task_tag(cmd), cmd);
goto out;
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
/* Automatically padded */
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
return 0;
out:
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return -1;
}
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
}
static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
u32 sectors;
if (dev->transport->get_device_type(dev) != TYPE_DISK)
return 0;
sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
pr_err("LBA: %llu Sectors: %u exceeds"
" transport_dev_end_lba(): %llu\n",
cmd->t_task_lba, sectors,
transport_dev_end_lba(dev));
return -EINVAL;
}
return 0;
}
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
{
/*
* Determine if the received WRITE_SAME is used to for direct
* passthrough into Linux/SCSI with struct request via TCM/pSCSI
* or we are signaling the use of internal WRITE_SAME + UNMAP=1
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
*/
int passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
if (!passthrough) {
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
return -ENOSYS;
}
/*
* Currently for the emulated case we only accept
* tpws with the UNMAP=1 bit set.
*/
if (!(flags[0] & 0x08)) {
pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
return -ENOSYS;
}
}
return 0;
}
/* transport_generic_cmd_sequencer():
*
* Generic Command Sequencer that should work for most DAS transport
* drivers.
*
* Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
* RX Thread.
*
* FIXME: Need to support other SCSI OPCODES where as well.
*/
static int transport_generic_cmd_sequencer(
struct se_cmd *cmd,
unsigned char *cdb)
{
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
int ret = 0, sector_ret = 0, passthrough;
u32 sectors = 0, size = 0, pr_reg_type = 0;
u16 service_action;
u8 alua_ascq = 0;
/*
* Check for an existing UNIT ATTENTION condition
*/
if (core_scsi3_ua_check(cmd, cdb) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
return -EINVAL;
}
/*
* Check status of Asymmetric Logical Unit Assignment port
*/
ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
/*
* Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
* The ALUA additional sense code qualifier (ASCQ) is determined
* by the ALUA primary or secondary access state..
*/
if (ret > 0) {
pr_debug("[%s]: ALUA TG Port not available,"
" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
cmd->se_tfo->get_fabric_name(), alua_ascq);
transport_set_sense_codes(cmd, 0x04, alua_ascq);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
return -EINVAL;
}
goto out_invalid_cdb_field;
}
/*
* Check status for SPC-3 Persistent Reservations
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
* SPC-3 Persistent Reservation.
*/
}
/*
* If we operate in passthrough mode we skip most CDB emulation and
* instead hand the commands down to the physical SCSI device.
*/
passthrough =
(dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
!(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
/*
* Do now allow BIDI commands for passthrough mode.
*/
if (passthrough)
goto out_unsupported_cdb;
/*
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
case XDWRITEREAD_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
/*
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic.
*/
cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
/*
* Do now allow BIDI commands for passthrough mode.
*/
if (passthrough)
goto out_unsupported_cdb;
/*
* Setup BIDI XOR callback to be run during after I/O
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
if (cdb[1] & 0x8)
cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors)
size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
" supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (target_check_write_same_discard(&cdb[10], dev) < 0)
goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_cmd = target_emulate_write_same;
break;
default:
pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
case MAINTENANCE_IN:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_IN from SCC-2 */
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
} else {
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT_10:
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_modesense;
break;
case MODE_SENSE_10:
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_modesense;
break;
case GPCMD_READ_BUFFER_CAPACITY:
case GPCMD_SEND_OPC:
case LOG_SELECT:
case LOG_SENSE:
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
case GPCMD_READ_DISC_INFO:
case GPCMD_READ_TRACK_RZONE_INFO:
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case PERSISTENT_RESERVE_IN:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_in;
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case PERSISTENT_RESERVE_OUT:
if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
cmd->execute_cmd = target_scsi3_emulate_pr_out;
size = (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
size = (cdb[8] << 8) + cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_POSITION:
size = READ_POSITION_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MAINTENANCE_OUT:
if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_OUT from SCC-2
*
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS &&
su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
cmd->execute_cmd =
target_emulate_set_target_port_groups;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
(cdb[8] << 8) | cdb[9];
} else {
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case INQUIRY:
size = (cdb[3] << 8) + cdb[4];
/*
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_inquiry;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_readcapacity;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
if (!passthrough)
cmd->execute_cmd =
target_emulate_readcapacity_16;
break;
default:
if (passthrough)
break;
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
goto out_invalid_cdb_field;
}
/*FALLTHROUGH*/
case ACCESS_CONTROL_IN:
case ACCESS_CONTROL_OUT:
case EXTENDED_COPY:
case READ_ATTRIBUTE:
case RECEIVE_COPY_RESULTS:
case WRITE_ATTRIBUTE:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
size = (cdb[3] << 8) | cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
#endif
case READ_TOC:
size = cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case REQUEST_SENSE:
size = cdb[4];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_request_sense;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RESERVE:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RESERVE_10)
size = (cdb[7] << 8) | cdb[8];
else
size = cmd->data_length;
/*
* Setup the legacy emulated handler for SPC-2 and
* >= SPC-3 compatible reservation handling (CRH=1)
* Otherwise, we assume the underlying SCSI logic is
* is running in SPC_PASSTHROUGH, and wants reservations
* emulation disabled.
*/
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
cmd->execute_cmd = target_scsi2_reservation_reserve;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
case RELEASE_10:
/*
* The SPC-2 RELEASE does not contain a size in the SCSI CDB.
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RELEASE_10)
size = (cdb[7] << 8) | cdb[8];
else
size = cmd->data_length;
if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
cmd->execute_cmd = target_scsi2_reservation_release;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
/*
* Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
cmd->t_task_lba = transport_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
if (passthrough)
break;
/*
* Check to ensure that LBA + Range does not exceed past end of
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
*/
if ((cmd->t_task_lba != 0) || (sectors != 0)) {
if (transport_cmd_get_valid_sectors(cmd) < 0)
goto out_invalid_cdb_field;
}
cmd->execute_cmd = target_emulate_synchronize_cache;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors) WARN_ON(!cmd->se_lun);
size = transport_get_size(1, cdb, cmd);
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be64(&cdb[2]); if (!dev)
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; return 0;
if (target_check_write_same_discard(&cdb[1], dev) < 0) spin_lock_irqsave(&cmd->t_state_lock, flags);
goto out_unsupported_cdb; if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
if (!passthrough) spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->execute_cmd = target_emulate_write_same; return 0;
break; }
case WRITE_SAME:
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
if (sectors) if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
size = transport_get_size(1, cdb, cmd); goto out;
else {
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
goto out_invalid_cdb_field;
}
cmd->t_task_lba = get_unaligned_be32(&cdb[2]); if (!dev->transport->get_sense_buffer) {
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; pr_err("dev->transport->get_sense_buffer is NULL\n");
/* goto out;
* Follow sbcr26 with WRITE_SAME (10) and check for the existence }
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (target_check_write_same_discard(&cdb[1], dev) < 0)
goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_cmd = target_emulate_write_same;
break;
case ALLOW_MEDIUM_REMOVAL:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case START_STOP:
case TEST_UNIT_READY:
case VERIFY:
case WRITE_FILEMARKS:
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
if (!passthrough)
cmd->execute_cmd = target_emulate_noop;
break;
case GPCMD_CLOSE_TRACK:
case INITIALIZE_ELEMENT_STATUS:
case GPCMD_LOAD_UNLOAD:
case GPCMD_SET_SPEED:
case MOVE_MEDIUM:
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case REPORT_LUNS:
cmd->execute_cmd = target_report_luns;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GET_EVENT_STATUS_NOTIFICATION:
size = (cdb[7] << 8) | cdb[8];
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case ATA_16:
/* Only support ATA passthrough to pSCSI backends.. */
if (!passthrough)
goto out_unsupported_cdb;
/* T_LENGTH */
switch (cdb[2] & 0x3) {
case 0x0:
sectors = 0;
break;
case 0x1:
sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
break;
case 0x2:
sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
break;
case 0x3:
pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
goto out_invalid_cdb_field;
}
/* BYTE_BLOCK */ sense_buffer = dev->transport->get_sense_buffer(cmd);
if (cdb[2] & 0x4) { if (!sense_buffer) {
/* BLOCK T_TYPE: 512 or sector */ pr_err("ITT 0x%08x cmd %p: Unable to locate"
size = sectors * ((cdb[2] & 0x10) ? " sense buffer for task with sense\n",
dev->se_sub_dev->se_dev_attrib.block_size : 512); cmd->se_tfo->get_task_tag(cmd), cmd);
} else { goto out;
/* BYTE */
size = sectors;
}
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
default:
pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
cmd->se_tfo->get_fabric_name(), cdb[0]);
goto out_unsupported_cdb;
} }
if (cmd->unknown_data_length) spin_unlock_irqrestore(&cmd->t_state_lock, flags);
cmd->data_length = size;
if (size != cmd->data_length) { offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER);
pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cdb[0]);
cmd->cmd_spdtl = size; memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER);
if (cmd->data_direction == DMA_TO_DEVICE) { /* Automatically padded */
pr_err("Rejecting underflow/overflow" cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
" WRITE data\n");
goto out_invalid_cdb_field;
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_SG_IO_CDB.
*/
if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
" plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
if (size > cmd->data_length) { pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n",
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
cmd->residual_count = (size - cmd->data_length); return 0;
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
}
cmd->data_length = size;
}
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { out:
if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) { spin_unlock_irqrestore(&cmd->t_state_lock, flags);
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" return -1;
" big sectors %u exceeds fabric_max_sectors:" }
" %u\n", cdb[0], sectors,
su_dev->se_dev_attrib.fabric_max_sectors); /*
goto out_invalid_cdb_field; * Process all commands up to the last received ORDERED task attribute which
} * requires another blocking boundary
if (sectors > su_dev->se_dev_attrib.hw_max_sectors) { */
printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" static void target_restart_delayed_cmds(struct se_device *dev)
" big sectors %u exceeds backend hw_max_sectors:" {
" %u\n", cdb[0], sectors, for (;;) {
su_dev->se_dev_attrib.hw_max_sectors); struct se_cmd *cmd;
goto out_invalid_cdb_field;
spin_lock(&dev->delayed_cmd_lock);
if (list_empty(&dev->delayed_cmd_list)) {
spin_unlock(&dev->delayed_cmd_lock);
break;
} }
}
/* reject any command that we don't have a handler for */ cmd = list_entry(dev->delayed_cmd_list.next,
if (!(passthrough || cmd->execute_cmd || struct se_cmd, se_delayed_node);
(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) list_del(&cmd->se_delayed_node);
goto out_unsupported_cdb; spin_unlock(&dev->delayed_cmd_lock);
transport_set_supported_SAM_opcode(cmd); __target_execute_cmd(cmd);
return ret;
out_unsupported_cdb: if (cmd->sam_task_attr == MSG_ORDERED_TAG)
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; break;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; }
return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
} }
/* /*
...@@ -3052,8 +1906,6 @@ static int transport_generic_cmd_sequencer( ...@@ -3052,8 +1906,6 @@ static int transport_generic_cmd_sequencer(
static void transport_complete_task_attr(struct se_cmd *cmd) static void transport_complete_task_attr(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds); atomic_dec(&dev->simple_cmds);
...@@ -3075,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd) ...@@ -3075,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
} }
/*
* Process all commands up to the last received
* ORDERED task attribute which requires another blocking
* boundary
*/
spin_lock(&dev->delayed_cmd_lock);
list_for_each_entry_safe(cmd_p, cmd_tmp,
&dev->delayed_cmd_list, se_delayed_node) {
list_del(&cmd_p->se_delayed_node); target_restart_delayed_cmds(dev);
spin_unlock(&dev->delayed_cmd_lock);
pr_debug("Calling add_tasks() for"
" cmd_p: 0x%02x Task Attr: 0x%02x"
" Dormant -> Active, se_ordered_id: %u\n",
cmd_p->t_task_cdb[0],
cmd_p->sam_task_attr, cmd_p->se_ordered_id);
target_add_to_execute_list(cmd_p);
new_active_tasks++;
spin_lock(&dev->delayed_cmd_lock);
if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
break;
}
spin_unlock(&dev->delayed_cmd_lock);
/*
* If new tasks have become active, wake up the transport thread
* to do the processing of the Active tasks.
*/
if (new_active_tasks != 0)
wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
} }
static void transport_complete_qf(struct se_cmd *cmd) static void transport_complete_qf(struct se_cmd *cmd)
...@@ -3365,31 +2187,27 @@ int transport_generic_map_mem_to_cmd( ...@@ -3365,31 +2187,27 @@ int transport_generic_map_mem_to_cmd(
if (!sgl || !sgl_count) if (!sgl || !sgl_count)
return 0; return 0;
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || /*
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { * Reject SCSI data overflow with map_mem_to_cmd() as incoming
/* * scatterlists already have been set to follow what the fabric
* Reject SCSI data overflow with map_mem_to_cmd() as incoming * passes for the original expected data transfer length.
* scatterlists already have been set to follow what the fabric */
* passes for the original expected data transfer length. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
*/ pr_warn("Rejecting SCSI DATA overflow for fabric using"
if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
pr_warn("Rejecting SCSI DATA overflow for fabric using" cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -EINVAL;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; }
return -EINVAL;
}
cmd->t_data_sg = sgl; cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count; cmd->t_data_nents = sgl_count;
if (sgl_bidi && sgl_bidi_count) { if (sgl_bidi && sgl_bidi_count) {
cmd->t_bidi_data_sg = sgl_bidi; cmd->t_bidi_data_sg = sgl_bidi;
cmd->t_bidi_data_nents = sgl_bidi_count; cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
} }
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0; return 0;
} }
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
...@@ -3461,7 +2279,7 @@ transport_generic_get_mem(struct se_cmd *cmd) ...@@ -3461,7 +2279,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
cmd->t_data_nents = nents; cmd->t_data_nents = nents;
sg_init_table(cmd->t_data_sg, nents); sg_init_table(cmd->t_data_sg, nents);
zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO; zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
while (length) { while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE); u32 page_len = min_t(u32, length, PAGE_SIZE);
...@@ -3492,7 +2310,6 @@ transport_generic_get_mem(struct se_cmd *cmd) ...@@ -3492,7 +2310,6 @@ transport_generic_get_mem(struct se_cmd *cmd)
*/ */
int transport_generic_new_cmd(struct se_cmd *cmd) int transport_generic_new_cmd(struct se_cmd *cmd)
{ {
struct se_device *dev = cmd->se_dev;
int ret = 0; int ret = 0;
/* /*
...@@ -3508,8 +2325,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -3508,8 +2325,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
} }
/* Workaround for handling zero-length control CDBs */ /* Workaround for handling zero-length control CDBs */
if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
!cmd->data_length) {
spin_lock_irq(&cmd->t_state_lock); spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE; cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= CMD_T_ACTIVE; cmd->transport_state |= CMD_T_ACTIVE;
...@@ -3527,52 +2343,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd) ...@@ -3527,52 +2343,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
return 0; return 0;
} }
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
if (transport_cmd_get_valid_sectors(cmd) < 0)
return -EINVAL;
BUG_ON(cmd->data_length % attr->block_size);
BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
attr->hw_max_sectors);
}
atomic_inc(&cmd->t_fe_count); atomic_inc(&cmd->t_fe_count);
/* /*
* For WRITEs, let the fabric know its buffer is ready. * If this command is not a write we can execute it right here,
* * for write buffers we need to notify the fabric driver first
* The command will be added to the execution queue after its write * and let it call back once the write buffers are ready.
* data has arrived.
*/ */
if (cmd->data_direction == DMA_TO_DEVICE) { target_add_to_state_list(cmd);
target_add_to_state_list(cmd); if (cmd->data_direction != DMA_TO_DEVICE) {
return transport_generic_write_pending(cmd); target_execute_cmd(cmd);
return 0;
} }
/*
* Everything else but a WRITE, add the command to the execution queue. spin_lock_irq(&cmd->t_state_lock);
*/ cmd->t_state = TRANSPORT_WRITE_PENDING;
transport_execute_tasks(cmd); spin_unlock_irq(&cmd->t_state_lock);
return 0;
transport_cmd_check_stop(cmd, false);
ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
if (ret < 0)
return ret;
return 1;
out_fail: out_fail:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL; return -EINVAL;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
transport_handle_queue_full(cmd, cmd->se_dev);
return 0;
} }
EXPORT_SYMBOL(transport_generic_new_cmd); EXPORT_SYMBOL(transport_generic_new_cmd);
/* transport_generic_process_write():
*
*
*/
void transport_generic_process_write(struct se_cmd *cmd)
{
transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);
static void transport_write_pending_qf(struct se_cmd *cmd) static void transport_write_pending_qf(struct se_cmd *cmd)
{ {
int ret; int ret;
...@@ -3585,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd) ...@@ -3585,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
} }
} }
static int transport_generic_write_pending(struct se_cmd *cmd)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
* CMD_T_ACTIVE so that transport_generic_handle_data can be called
* from HW target mode interrupt code. This is safe to be called
* with transport_off=1 before the cmd->se_tfo->write_pending
* because the se_cmd->se_lun pointer is not being cleared.
*/
transport_cmd_check_stop(cmd, 1, 0);
/*
* Call the fabric write_pending function here to let the
* frontend know that WRITE buffers are ready.
*/
ret = cmd->se_tfo->write_pending(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
else if (ret < 0)
return ret;
return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
transport_handle_queue_full(cmd, cmd->se_dev);
return 0;
}
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{ {
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
...@@ -3648,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd); ...@@ -3648,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
* @se_cmd: command descriptor to add * @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/ */
void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
bool ack_kref) bool ack_kref)
{ {
unsigned long flags; unsigned long flags;
int ret = 0;
kref_init(&se_cmd->cmd_kref); kref_init(&se_cmd->cmd_kref);
/* /*
...@@ -3665,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, ...@@ -3665,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
} }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
ret = -ESHUTDOWN;
goto out;
}
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1; se_cmd->check_release = 1;
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
return ret;
} }
EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref) static void target_release_cmd_kref(struct kref *kref)
{ {
...@@ -3704,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) ...@@ -3704,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
} }
EXPORT_SYMBOL(target_put_sess_cmd); EXPORT_SYMBOL(target_put_sess_cmd);
/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list /* target_sess_cmd_list_set_waiting - Flag all commands in
* @se_sess: session to split * sess_cmd_list to complete cmd_wait_comp. Set
* sess_tearing_down so no more commands are queued.
* @se_sess: session to flag
*/ */
void target_splice_sess_cmd_list(struct se_session *se_sess) void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{ {
struct se_cmd *se_cmd; struct se_cmd *se_cmd;
unsigned long flags; unsigned long flags;
WARN_ON(!list_empty(&se_sess->sess_wait_list));
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); WARN_ON(se_sess->sess_tearing_down);
se_sess->sess_tearing_down = 1;
list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
se_cmd->cmd_wait_set = 1; se_cmd->cmd_wait_set = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
} }
EXPORT_SYMBOL(target_splice_sess_cmd_list); EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/* target_wait_for_sess_cmds - Wait for outstanding descriptors /* target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O * @se_sess: session to wait for active I/O
...@@ -3739,7 +2517,7 @@ void target_wait_for_sess_cmds( ...@@ -3739,7 +2517,7 @@ void target_wait_for_sess_cmds(
bool rc = false; bool rc = false;
list_for_each_entry_safe(se_cmd, tmp_cmd, list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) { &se_sess->sess_cmd_list, se_cmd_list) {
list_del(&se_cmd->se_cmd_list); list_del(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
...@@ -3791,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) ...@@ -3791,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, 1, 0); transport_cmd_check_stop(cmd, false);
return -EPERM; return -EPERM;
} }
cmd->transport_state |= CMD_T_LUN_FE_STOP; cmd->transport_state |= CMD_T_LUN_FE_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
// XXX: audit task_flags checks. // XXX: audit task_flags checks.
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if ((cmd->transport_state & CMD_T_BUSY) && if ((cmd->transport_state & CMD_T_BUSY) &&
(cmd->transport_state & CMD_T_SENT)) { (cmd->transport_state & CMD_T_SENT)) {
if (!target_stop_cmd(cmd, &flags)) if (!target_stop_cmd(cmd, &flags))
ret++; ret++;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
} else {
spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
target_remove_from_execute_list(cmd);
} }
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("ConfigFS: cmd: %p stop tasks ret:" pr_debug("ConfigFS: cmd: %p stop tasks ret:"
" %d\n", cmd, ret); " %d\n", cmd, ret);
...@@ -3821,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) ...@@ -3821,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
cmd->se_tfo->get_task_tag(cmd)); cmd->se_tfo->get_task_tag(cmd));
} }
transport_remove_cmd_from_queue(cmd);
return 0; return 0;
} }
...@@ -3840,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) ...@@ -3840,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
struct se_cmd, se_lun_node); struct se_cmd, se_lun_node);
list_del_init(&cmd->se_lun_node); list_del_init(&cmd->se_lun_node);
/*
* This will notify iscsi_target_transport.c:
* transport_cmd_check_stop() that a LUN shutdown is in
* progress for the iscsi_cmd_t.
*/
spin_lock(&cmd->t_state_lock); spin_lock(&cmd->t_state_lock);
pr_debug("SE_LUN[%d] - Setting cmd->transport" pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n", "_lun_stop for ITT: 0x%08x\n",
...@@ -3911,7 +2677,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) ...@@ -3911,7 +2677,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
spin_unlock_irqrestore(&cmd->t_state_lock, spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags); cmd_flags);
transport_cmd_check_stop(cmd, 1, 0); transport_cmd_check_stop(cmd, false);
complete(&cmd->transport_lun_fe_stop_comp); complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue; continue;
...@@ -3967,10 +2733,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) ...@@ -3967,10 +2733,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false; return false;
} }
/*
* Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
* has been set in transport_set_supported_SAM_opcode().
*/
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
...@@ -4028,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd) ...@@ -4028,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
wait_for_completion(&cmd->t_transport_stop_comp); wait_for_completion(&cmd->t_transport_stop_comp);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
...@@ -4212,6 +2973,15 @@ int transport_send_check_condition_and_sense( ...@@ -4212,6 +2973,15 @@ int transport_send_check_condition_and_sense(
/* WRITE PROTECTED */ /* WRITE PROTECTED */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27; buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
break; break;
case TCM_ADDRESS_OUT_OF_RANGE:
/* CURRENT ERROR */
buffer[offset] = 0x70;
buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL BLOCK ADDRESS OUT OF RANGE */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
break;
case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */ /* CURRENT ERROR */
buffer[offset] = 0x70; buffer[offset] = 0x70;
...@@ -4312,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd) ...@@ -4312,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd); cmd->se_tfo->queue_status(cmd);
} }
static int transport_generic_do_tmr(struct se_cmd *cmd) static void target_tmr_work(struct work_struct *work)
{ {
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req; struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret; int ret;
...@@ -4349,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd) ...@@ -4349,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->se_tfo->queue_tm_rsp(cmd); cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop_to_fabric(cmd); transport_cmd_check_stop_to_fabric(cmd);
return 0;
} }
/* transport_processing_thread(): int transport_generic_handle_tmr(
* struct se_cmd *cmd)
*
*/
static int transport_processing_thread(void *param)
{ {
int ret; INIT_WORK(&cmd->work, target_tmr_work);
struct se_cmd *cmd; queue_work(cmd->se_dev->tmr_wq, &cmd->work);
struct se_device *dev = param;
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
kthread_should_stop());
if (ret < 0)
goto out;
get_cmd:
cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
if (!cmd)
continue;
switch (cmd->t_state) {
case TRANSPORT_NEW_CMD:
BUG();
break;
case TRANSPORT_NEW_CMD_MAP:
if (!cmd->se_tfo->new_cmd_map) {
pr_err("cmd->se_tfo->new_cmd_map is"
" NULL for TRANSPORT_NEW_CMD_MAP\n");
BUG();
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
transport_generic_request_failure(cmd);
break;
}
break;
case TRANSPORT_PROCESS_WRITE:
transport_generic_process_write(cmd);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
break;
case TRANSPORT_COMPLETE_QF_WP:
transport_write_pending_qf(cmd);
break;
case TRANSPORT_COMPLETE_QF_OK:
transport_complete_qf(cmd);
break;
default:
pr_err("Unknown t_state: %d for ITT: 0x%08x "
"i_state: %d on SE LUN: %u\n",
cmd->t_state,
cmd->se_tfo->get_task_tag(cmd),
cmd->se_tfo->get_cmd_state(cmd),
cmd->se_lun->unpacked_lun);
BUG();
}
goto get_cmd;
}
out:
WARN_ON(!list_empty(&dev->state_list));
WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
dev->process_thread = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(transport_generic_handle_tmr);
...@@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd) ...@@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
*/ */
if ((ep->xid <= lport->lro_xid) && if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) && if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid, lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg, se_cmd->t_data_sg,
se_cmd->t_data_nents)) se_cmd->t_data_nents))
...@@ -543,9 +543,11 @@ static void ft_send_work(struct work_struct *work) ...@@ -543,9 +543,11 @@ static void ft_send_work(struct work_struct *work)
* Use a single se_cmd->cmd_kref as we expect to release se_cmd * Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path. * directly from ft_check_stop_free callback in response path.
*/ */
target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir, 0); ntohl(fcp->fc_dl), task_attr, data_dir, 0))
goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return; return;
......
...@@ -183,6 +183,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd) ...@@ -183,6 +183,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
return ft_queue_status(se_cmd); return ft_queue_status(se_cmd);
} }
static void ft_execute_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
target_execute_cmd(&cmd->se_cmd);
}
/* /*
* Receive write data frame. * Receive write data frame.
*/ */
...@@ -307,8 +314,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) ...@@ -307,8 +314,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
cmd->write_data_len += tlen; cmd->write_data_len += tlen;
} }
last_frame: last_frame:
if (cmd->write_data_len == se_cmd->data_length) if (cmd->write_data_len == se_cmd->data_length) {
transport_generic_handle_data(se_cmd); INIT_WORK(&cmd->work, ft_execute_work);
queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
}
drop: drop:
fc_frame_free(fp); fc_frame_free(fp);
} }
......
...@@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd) ...@@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
pr_err("%s(%d)\n", __func__, __LINE__); pr_err("%s(%d)\n", __func__, __LINE__);
wait_for_completion(&cmd->write_complete); wait_for_completion(&cmd->write_complete);
transport_generic_process_write(se_cmd); target_execute_cmd(se_cmd);
cleanup: cleanup:
return ret; return ret;
} }
...@@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd) ...@@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
} }
wait_for_completion(&cmd->write_complete); wait_for_completion(&cmd->write_complete);
transport_generic_process_write(se_cmd); target_execute_cmd(se_cmd);
cleanup: cleanup:
return ret; return ret;
} }
...@@ -1065,16 +1065,20 @@ static void usbg_cmd_work(struct work_struct *work) ...@@ -1065,16 +1065,20 @@ static void usbg_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense); cmd->prio_attr, cmd->sense_iu.sense);
goto out;
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd);
return;
} }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE); 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
goto out;
return;
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd);
} }
static int usbg_submit_command(struct f_uas *fu, static int usbg_submit_command(struct f_uas *fu,
...@@ -1177,16 +1181,20 @@ static void bot_cmd_work(struct work_struct *work) ...@@ -1177,16 +1181,20 @@ static void bot_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo, tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE, tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense); cmd->prio_attr, cmd->sense_iu.sense);
goto out;
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd);
return;
} }
target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun, cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
cmd->data_len, cmd->prio_attr, dir, 0); cmd->data_len, cmd->prio_attr, dir, 0) < 0)
goto out;
return;
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
usbg_cleanup_cmd(cmd);
} }
static int bot_submit_command(struct f_uas *fu, static int bot_submit_command(struct f_uas *fu,
...@@ -1400,19 +1408,6 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg) ...@@ -1400,19 +1408,6 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1; return 1;
} }
static int usbg_new_cmd(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
int ret;
ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
if (ret)
return ret;
return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
}
static void usbg_cmd_release(struct kref *ref) static void usbg_cmd_release(struct kref *ref)
{ {
struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd, struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
...@@ -1902,7 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = { ...@@ -1902,7 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = {
.tpg_alloc_fabric_acl = usbg_alloc_fabric_acl, .tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
.tpg_release_fabric_acl = usbg_release_fabric_acl, .tpg_release_fabric_acl = usbg_release_fabric_acl,
.tpg_get_inst_index = usbg_tpg_get_inst_index, .tpg_get_inst_index = usbg_tpg_get_inst_index,
.new_cmd_map = usbg_new_cmd,
.release_cmd = usbg_release_cmd, .release_cmd = usbg_release_cmd,
.shutdown_session = usbg_shutdown_session, .shutdown_session = usbg_shutdown_session,
.close_session = usbg_close_session, .close_session = usbg_close_session,
......
...@@ -24,10 +24,8 @@ struct se_subsystem_api { ...@@ -24,10 +24,8 @@ struct se_subsystem_api {
struct se_subsystem_dev *, void *); struct se_subsystem_dev *, void *);
void (*free_device)(void *); void (*free_device)(void *);
int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
enum dma_data_direction); int (*parse_cdb)(struct se_cmd *cmd);
int (*do_discard)(struct se_device *, sector_t, u32);
void (*do_sync_cache)(struct se_cmd *);
ssize_t (*check_configfs_dev_params)(struct se_hba *, ssize_t (*check_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *); struct se_subsystem_dev *);
ssize_t (*set_configfs_dev_params)(struct se_hba *, ssize_t (*set_configfs_dev_params)(struct se_hba *,
...@@ -40,6 +38,13 @@ struct se_subsystem_api { ...@@ -40,6 +38,13 @@ struct se_subsystem_api {
unsigned char *(*get_sense_buffer)(struct se_cmd *); unsigned char *(*get_sense_buffer)(struct se_cmd *);
}; };
struct spc_ops {
int (*execute_rw)(struct se_cmd *cmd);
int (*execute_sync_cache)(struct se_cmd *cmd);
int (*execute_write_same)(struct se_cmd *cmd);
int (*execute_unmap)(struct se_cmd *cmd);
};
int transport_subsystem_register(struct se_subsystem_api *); int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *); void transport_subsystem_release(struct se_subsystem_api *);
...@@ -49,6 +54,10 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *, ...@@ -49,6 +54,10 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
void target_complete_cmd(struct se_cmd *, u8); void target_complete_cmd(struct se_cmd *, u8);
int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops);
int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
int spc_get_write_same_sectors(struct se_cmd *cmd);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
......
...@@ -145,12 +145,9 @@ enum transport_state_table { ...@@ -145,12 +145,9 @@ enum transport_state_table {
TRANSPORT_NO_STATE = 0, TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1, TRANSPORT_NEW_CMD = 1,
TRANSPORT_WRITE_PENDING = 3, TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5, TRANSPORT_PROCESSING = 5,
TRANSPORT_COMPLETE = 6, TRANSPORT_COMPLETE = 6,
TRANSPORT_PROCESS_TMR = 9,
TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_WP = 18,
TRANSPORT_COMPLETE_QF_OK = 19, TRANSPORT_COMPLETE_QF_OK = 19,
}; };
...@@ -160,25 +157,20 @@ enum se_cmd_flags_table { ...@@ -160,25 +157,20 @@ enum se_cmd_flags_table {
SCF_SUPPORTED_SAM_OPCODE = 0x00000001, SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
SCF_TRANSPORT_TASK_SENSE = 0x00000002, SCF_TRANSPORT_TASK_SENSE = 0x00000002,
SCF_EMULATED_TASK_SENSE = 0x00000004, SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, SCF_SCSI_DATA_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, SCF_SCSI_TMR_CDB = 0x00000010,
SCF_SCSI_NON_DATA_CDB = 0x00000020, SCF_SCSI_CDB_EXCEPTION = 0x00000020,
SCF_SCSI_TMR_CDB = 0x00000040, SCF_SCSI_RESERVATION_CONFLICT = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_FUA = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, SCF_SE_LUN_CMD = 0x00000100,
SCF_FUA = 0x00000200, SCF_BIDI = 0x00000400,
SCF_SE_LUN_CMD = 0x00000800, SCF_SENT_CHECK_CONDITION = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000, SCF_OVERFLOW_BIT = 0x00001000,
SCF_BIDI = 0x00002000, SCF_UNDERFLOW_BIT = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000, SCF_SENT_DELAYED_TAS = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000, SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_SENT_DELAYED_TAS = 0x00020000, SCF_ACK_KREF = 0x00040000,
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
}; };
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
...@@ -220,6 +212,7 @@ enum tcm_sense_reason_table { ...@@ -220,6 +212,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e, TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f, TCM_CHECK_CONDITION_NOT_READY = 0x0f,
TCM_RESERVATION_CONFLICT = 0x10, TCM_RESERVATION_CONFLICT = 0x10,
TCM_ADDRESS_OUT_OF_RANGE = 0x11,
}; };
enum target_sc_flags_table { enum target_sc_flags_table {
...@@ -471,13 +464,6 @@ struct t10_reservation { ...@@ -471,13 +464,6 @@ struct t10_reservation {
struct t10_reservation_ops pr_ops; struct t10_reservation_ops pr_ops;
}; };
struct se_queue_obj {
atomic_t queue_cnt;
spinlock_t cmd_queue_lock;
struct list_head qobj_list;
wait_queue_head_t thread_wq;
};
struct se_tmr_req { struct se_tmr_req {
/* Task Management function to be performed */ /* Task Management function to be performed */
u8 function; u8 function;
...@@ -486,11 +472,8 @@ struct se_tmr_req { ...@@ -486,11 +472,8 @@ struct se_tmr_req {
int call_transport; int call_transport;
/* Reference to ITT that Task Mgmt should be performed */ /* Reference to ITT that Task Mgmt should be performed */
u32 ref_task_tag; u32 ref_task_tag;
/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
u64 ref_task_lun;
void *fabric_tmr_ptr; void *fabric_tmr_ptr;
struct se_cmd *task_cmd; struct se_cmd *task_cmd;
struct se_cmd *ref_cmd;
struct se_device *tmr_dev; struct se_device *tmr_dev;
struct se_lun *tmr_lun; struct se_lun *tmr_lun;
struct list_head tmr_list; struct list_head tmr_list;
...@@ -537,7 +520,6 @@ struct se_cmd { ...@@ -537,7 +520,6 @@ struct se_cmd {
/* Only used for internal passthrough and legacy TCM fabric modules */ /* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess; struct se_session *se_sess;
struct se_tmr_req *se_tmr_req; struct se_tmr_req *se_tmr_req;
struct list_head se_queue_node;
struct list_head se_cmd_list; struct list_head se_cmd_list;
struct completion cmd_wait_comp; struct completion cmd_wait_comp;
struct kref cmd_kref; struct kref cmd_kref;
...@@ -575,7 +557,6 @@ struct se_cmd { ...@@ -575,7 +557,6 @@ struct se_cmd {
struct scatterlist *t_bidi_data_sg; struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents; unsigned int t_bidi_data_nents;
struct list_head execute_list;
struct list_head state_list; struct list_head state_list;
bool state_active; bool state_active;
...@@ -633,7 +614,6 @@ struct se_session { ...@@ -633,7 +614,6 @@ struct se_session {
struct list_head sess_list; struct list_head sess_list;
struct list_head sess_acl_list; struct list_head sess_acl_list;
struct list_head sess_cmd_list; struct list_head sess_cmd_list;
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock; spinlock_t sess_cmd_lock;
struct kref sess_kref; struct kref sess_kref;
}; };
...@@ -780,13 +760,11 @@ struct se_device { ...@@ -780,13 +760,11 @@ struct se_device {
/* Active commands on this virtual SE device */ /* Active commands on this virtual SE device */
atomic_t simple_cmds; atomic_t simple_cmds;
atomic_t dev_ordered_id; atomic_t dev_ordered_id;
atomic_t execute_tasks;
atomic_t dev_ordered_sync; atomic_t dev_ordered_sync;
atomic_t dev_qf_count; atomic_t dev_qf_count;
struct se_obj dev_obj; struct se_obj dev_obj;
struct se_obj dev_access_obj; struct se_obj dev_access_obj;
struct se_obj dev_export_obj; struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock; spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock; spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock; spinlock_t dev_reservation_lock;
...@@ -802,11 +780,9 @@ struct se_device { ...@@ -802,11 +780,9 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder; struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list; struct list_head dev_sep_list;
struct list_head dev_tmr_list; struct list_head dev_tmr_list;
/* Pointer to descriptor for processing thread */ struct workqueue_struct *tmr_wq;
struct task_struct *process_thread;
struct work_struct qf_work_queue; struct work_struct qf_work_queue;
struct list_head delayed_cmd_list; struct list_head delayed_cmd_list;
struct list_head execute_list;
struct list_head state_list; struct list_head state_list;
struct list_head qf_cmd_list; struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */ /* Pointer to associated SE HBA */
......
...@@ -32,12 +32,6 @@ struct target_core_fabric_ops { ...@@ -32,12 +32,6 @@ struct target_core_fabric_ops {
void (*tpg_release_fabric_acl)(struct se_portal_group *, void (*tpg_release_fabric_acl)(struct se_portal_group *,
struct se_node_acl *); struct se_node_acl *);
u32 (*tpg_get_inst_index)(struct se_portal_group *); u32 (*tpg_get_inst_index)(struct se_portal_group *);
/*
* Optional function pointer for TCM to perform command map
* from TCM processing thread context, for those struct se_cmd
* initially allocated in interrupt context.
*/
int (*new_cmd_map)(struct se_cmd *);
/* /*
* Optional to release struct se_cmd and fabric dependent allocated * Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop(). * I/O descriptor in transport_cmd_check_stop().
...@@ -108,20 +102,18 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, ...@@ -108,20 +102,18 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *); struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32); int transport_lookup_cmd_lun(struct se_cmd *, u32);
int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *, int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u32, u32, int, int, int); unsigned char *, u32, u32, int, int, int);
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u32 unpacked_lun, unsigned char *sense, u32 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type, void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int); gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *); int transport_handle_cdb_direct(struct se_cmd *);
int transport_generic_handle_cdb_map(struct se_cmd *);
int transport_generic_handle_data(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
struct scatterlist *, u32, struct scatterlist *, u32); struct scatterlist *, u32, struct scatterlist *, u32);
int transport_generic_new_cmd(struct se_cmd *); int transport_generic_new_cmd(struct se_cmd *);
void transport_generic_process_write(struct se_cmd *); void target_execute_cmd(struct se_cmd *cmd);
void transport_generic_free_cmd(struct se_cmd *, int); void transport_generic_free_cmd(struct se_cmd *, int);
...@@ -129,9 +121,8 @@ bool transport_wait_for_tasks(struct se_cmd *); ...@@ -129,9 +121,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int); int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
int target_put_sess_cmd(struct se_session *, struct se_cmd *); int target_put_sess_cmd(struct se_session *, struct se_cmd *);
void target_splice_sess_cmd_list(struct se_session *); void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *, int); void target_wait_for_sess_cmds(struct se_session *, int);
int core_alua_check_nonop_delay(struct se_cmd *); int core_alua_check_nonop_delay(struct se_cmd *);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册