提交 6aad3738 编写于 作者: L Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target: use ->exectute_task for all CDB emulation
  target: remove SCF_EMULATE_CDB_ASYNC
  target: refactor transport_emulate_control_cdb
  target: pass the se_task to the CDB emulation callback
  target: split core_scsi3_emulate_pr
  target: split core_scsi2_emulate_crh
  target: Add generic active I/O shutdown logic
  target: add back error handling in transport_complete_task
  target/pscsi: blk_make_request() returns an ERR_PTR()
  target: Remove core TRANSPORT_FREE_CMD_INTR usage
  target: Make TFO->check_stop_free return free status
  iscsi-target: Fix non-immediate TMR handling
  iscsi-target: Add missing CMDSN_LOWER_THAN_EXP check in iscsit_handle_scsi_cmd
  target: Avoid double list_del for aborted se_tmr_req
  target: Minor cleanups to core_tmr_drain_tmr_list
  target: Fix wrong se_tmr being added to drain_tmr_list
  target: Fix incorrect se_cmd assignment in core_tmr_drain_tmr_list
  target: Check -ENOMEM to signal QUEUE_FULL from fabric callbacks
  tcm_loop: Add explict read buffer memset for SCF_SCSI_CONTROL_SG_IO_CDB
  target: Fix compile warning w/ missing module.h include
......@@ -1079,7 +1079,9 @@ static int iscsit_handle_scsi_cmd(
*/
if (!cmd->immediate_data) {
cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
......@@ -1819,17 +1821,16 @@ static int iscsit_handle_task_mgt_cmd(
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
out_of_order_cmdsn = 1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
return 0;
} else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_PROTOCOL_ERROR,
1, 0, buf, cmd);
}
}
iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
if (out_of_order_cmdsn)
if (out_of_order_cmdsn || !(hdr->opcode & ISCSI_OP_IMMEDIATE))
return 0;
/*
* Found the referenced task, send to transport for processing.
......
......@@ -174,6 +174,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
sgl_bidi = sdb->table.sgl;
sgl_bidi_count = sdb->table.nents;
}
/*
* Because some userspace code via scsi-generic do not memset their
* associated read buffers, go ahead and do that here for type
* SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
* guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
* by target core in transport_generic_allocate_tasks() ->
* transport_generic_cmd_sequencer().
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
struct scatterlist *sg = scsi_sglist(sc);
unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
if (buf != NULL) {
memset(buf, 0, sg->length);
kunmap(sg_page(sg));
}
}
/* Tell the core about our preallocated memory */
ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
......@@ -187,7 +205,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
/*
* Do not release struct se_cmd's containing a valid TMR
......@@ -195,12 +213,13 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* with transport_generic_free_cmd().
*/
if (se_cmd->se_tmr_req)
return;
return 0;
/*
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
......
......@@ -58,8 +58,9 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
int target_emulate_report_target_port_groups(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
......@@ -164,6 +165,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
......@@ -172,8 +175,9 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
*
* See spc4r17 section 6.35
*/
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
int target_emulate_set_target_port_groups(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
......@@ -341,7 +345,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
......
......@@ -66,8 +66,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int core_emulate_report_target_port_groups(struct se_cmd *);
extern int core_emulate_set_target_port_groups(struct se_cmd *);
extern int target_emulate_report_target_port_groups(struct se_task *);
extern int target_emulate_set_target_port_groups(struct se_task *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
......
......@@ -32,6 +32,7 @@
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_ua.h"
#include "target_core_cdb.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
......@@ -679,16 +680,18 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
static int
target_emulate_inquiry(struct se_cmd *cmd)
int target_emulate_inquiry(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned char *cdb = cmd->t_task_cdb;
int p, ret;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
if (!(cdb[1] & 0x1)) {
ret = target_emulate_inquiry_std(cmd);
goto out;
}
/*
* Make sure we at least have 4 bytes of INQUIRY response
......@@ -707,22 +710,30 @@ target_emulate_inquiry(struct se_cmd *cmd)
buf[0] = dev->transport->get_device_type(dev);
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) {
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf);
transport_kunmap_first_data_page(cmd);
return ret;
goto out_unmap;
}
}
transport_kunmap_first_data_page(cmd);
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
return -EINVAL;
ret = -EINVAL;
out_unmap:
transport_kunmap_first_data_page(cmd);
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
static int
target_emulate_readcapacity(struct se_cmd *cmd)
int target_emulate_readcapacity(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
......@@ -751,12 +762,14 @@ target_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
int target_emulate_readcapacity_16(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
......@@ -784,6 +797,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
......@@ -922,14 +937,15 @@ target_modesense_dpofua(unsigned char *buf, int type)
}
}
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
int target_emulate_modesense(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
int offset = ten ? 8 : 4;
int length = 0;
unsigned char buf[SE_MODE_PAGE_BUF];
......@@ -995,12 +1011,14 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
memcpy(rbuf, buf, offset);
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
static int
target_emulate_request_sense(struct se_cmd *cmd)
int target_emulate_request_sense(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
......@@ -1059,7 +1077,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
end:
transport_kunmap_first_data_page(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
......@@ -1067,8 +1086,7 @@ target_emulate_request_sense(struct se_cmd *cmd)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
static int
target_emulate_unmap(struct se_task *task)
int target_emulate_unmap(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
......@@ -1079,6 +1097,12 @@ target_emulate_unmap(struct se_task *task)
int ret = 0, offset;
unsigned short dl, bd_dl;
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
/* First UNMAP block descriptor starts at 8 byte offset */
offset = 8;
size -= 8;
......@@ -1110,7 +1134,10 @@ target_emulate_unmap(struct se_task *task)
err:
transport_kunmap_first_data_page(cmd);
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
......@@ -1118,14 +1145,28 @@ target_emulate_unmap(struct se_task *task)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
static int
target_emulate_write_same(struct se_task *task, u32 num_blocks)
int target_emulate_write_same(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
sector_t range;
sector_t lba = cmd->t_task_lba;
u32 num_blocks;
int ret;
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
/*
* Use the explicit range when non zero is supplied, otherwise calculate
* the remaining range based on ->get_blocks() - starting LBA.
......@@ -1144,127 +1185,30 @@ target_emulate_write_same(struct se_task *task, u32 num_blocks)
return ret;
}
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
int
transport_emulate_control_cdb(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned short service_action;
int ret = 0;
struct se_device *dev = task->task_se_cmd->se_dev;
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
ret = target_emulate_inquiry(cmd);
break;
case READ_CAPACITY:
ret = target_emulate_readcapacity(cmd);
break;
case MODE_SENSE:
ret = target_emulate_modesense(cmd, 0);
break;
case MODE_SENSE_10:
ret = target_emulate_modesense(cmd, 1);
break;
case SERVICE_ACTION_IN:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
ret = target_emulate_readcapacity_16(cmd);
break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
break;
case REQUEST_SENSE:
ret = target_emulate_request_sense(cmd);
break;
case UNMAP:
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_unmap(task);
break;
case WRITE_SAME:
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task,
get_unaligned_be16(&cmd->t_task_cdb[7]));
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task,
get_unaligned_be32(&cmd->t_task_cdb[10]));
break;
case VARIABLE_LENGTH_CMD:
service_action =
get_unaligned_be16(&cmd->t_task_cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task,
get_unaligned_be32(&cmd->t_task_cdb[28]));
break;
default:
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
break;
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
dev->transport->do_sync_cache(task);
break;
case ALLOW_MEDIUM_REMOVAL:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case START_STOP:
case TEST_UNIT_READY:
case VERIFY:
case WRITE_FILEMARKS:
break;
default:
pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
cmd->t_task_cdb[0], dev->transport->name);
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
if (ret < 0)
return ret;
/*
* Handle the successful completion here unless a caller
* has explictly requested an asychronous completion.
*/
if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
dev->transport->do_sync_cache(task);
return 0;
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
int target_emulate_noop(struct se_task *task)
{
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
/*
......
#ifndef TARGET_CORE_CDB_H
#define TARGET_CORE_CDB_H
int target_emulate_inquiry(struct se_task *task);
int target_emulate_readcapacity(struct se_task *task);
int target_emulate_readcapacity_16(struct se_task *task);
int target_emulate_modesense(struct se_task *task);
int target_emulate_request_sense(struct se_task *task);
int target_emulate_unmap(struct se_task *task);
int target_emulate_write_same(struct se_task *task);
int target_emulate_synchronize_cache(struct se_task *task);
int target_emulate_noop(struct se_task *task);
#endif /* TARGET_CORE_CDB_H */
......@@ -651,23 +651,15 @@ void core_dev_unexport(
lun->lun_se_dev = NULL;
}
int transport_core_report_lun_response(struct se_cmd *se_cmd)
int target_report_luns(struct se_task *se_task)
{
struct se_cmd *se_cmd = se_task->task_se_cmd;
struct se_dev_entry *deve;
struct se_lun *se_lun;
struct se_session *se_sess = se_cmd->se_sess;
struct se_task *se_task;
unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
break;
if (!se_task) {
pr_err("Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
buf = transport_kmap_first_data_page(se_cmd);
/*
......@@ -713,6 +705,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
......
......@@ -116,114 +116,21 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
return ret;
}
static int core_scsi2_reservation_release(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_reserved_node_acl || !sess) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
if (dev->dev_reserved_node_acl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
dev->dev_res_bin_isid = 0;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
" MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
return PYX_TRANSPORT_ILLEGAL_REQUEST;
}
/*
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
tpg->se_tpg_tfo->get_fabric_name());
pr_err("Original reserver LUN: %u %s\n",
cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
dev->dev_reserved_node_acl = sess->se_node_acl;
dev->dev_flags |= DF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
struct se_node_acl *, struct se_session *);
static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
/*
* Setup in target_core_transport.c:transport_generic_cmd_sequencer()
* and called via struct se_cmd->transport_emulate_cdb() in TCM processing
* thread context.
*/
int core_scsi2_emulate_crh(struct se_cmd *cmd)
static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
{
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
unsigned char *cdb = &cmd->t_task_cdb[0];
int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
int conflict = 0;
if (!se_sess)
return 0;
if (!crh)
goto after_crh;
return false;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
......@@ -251,14 +158,16 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
*/
if (pr_reg->pr_res_holder) {
core_scsi3_put_pr_reg(pr_reg);
return 0;
*ret = 0;
return false;
}
if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
core_scsi3_put_pr_reg(pr_reg);
return 0;
*ret = 0;
return true;
}
core_scsi3_put_pr_reg(pr_reg);
conflict = 1;
......@@ -282,18 +191,118 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
*ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
return true;
}
after_crh:
if ((cdb[0] == RESERVE) || (cdb[0] == RESERVE_10))
return core_scsi2_reservation_reserve(cmd);
else if ((cdb[0] == RELEASE) || (cdb[0] == RELEASE_10))
return core_scsi2_reservation_release(cmd);
else
return PYX_TRANSPORT_INVALID_CDB_FIELD;
return false;
}
int target_scsi2_reservation_release(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
int ret = 0;
if (!sess || !tpg)
goto out;
if (target_check_scsi2_reservation_conflict(cmd, &ret))
goto out;
ret = 0;
spin_lock(&dev->dev_reservation_lock);
if (!dev->dev_reserved_node_acl || !sess)
goto out_unlock;
if (dev->dev_reserved_node_acl != sess->se_node_acl)
goto out_unlock;
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
dev->dev_res_bin_isid = 0;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
" MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
int target_scsi2_reservation_reserve(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
int ret = 0;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
goto out;
}
/*
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
if (!sess || !tpg)
goto out;
if (target_check_scsi2_reservation_conflict(cmd, &ret))
goto out;
ret = 0;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
tpg->se_tpg_tfo->get_fabric_name());
pr_err("Original reserver LUN: %u %s\n",
cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
goto out_unlock;
}
dev->dev_reserved_node_acl = sess->se_node_acl;
dev->dev_flags |= DF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
}
pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
/*
* Begin SPC-3/SPC-4 Persistent Reservations emulation support
*
......@@ -418,12 +427,12 @@ static int core_scsi3_pr_seq_non_holder(
break;
case RELEASE:
case RELEASE_10:
/* Handled by CRH=1 in core_scsi2_emulate_crh() */
/* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
case RESERVE:
case RESERVE_10:
/* Handled by CRH=1 in core_scsi2_emulate_crh() */
/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
break;
case TEST_UNIT_READY:
......@@ -3739,12 +3748,33 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
int target_scsi3_emulate_pr_out(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
int ret;
/*
* Following spc2r20 5.5.1 Reservations overview:
*
* If a logical unit has been reserved by any RESERVE command and is
* still reserved by any initiator, all PERSISTENT RESERVE IN and all
* PERSISTENT RESERVE OUT commands shall conflict regardless of
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
goto out;
}
/*
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
......@@ -3755,7 +3785,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
/*
* From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
......@@ -3788,8 +3819,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
/*
* From spc4r17 section 6.14:
*
......@@ -3803,7 +3837,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
/*
* (core_scsi3_emulate_pro_* function parameters
......@@ -3812,35 +3847,47 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
*/
switch (sa) {
case PRO_REGISTER:
return core_scsi3_emulate_pro_register(cmd,
ret = core_scsi3_emulate_pro_register(cmd,
res_key, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 0);
break;
case PRO_RESERVE:
return core_scsi3_emulate_pro_reserve(cmd,
type, scope, res_key);
ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key);
break;
case PRO_RELEASE:
return core_scsi3_emulate_pro_release(cmd,
type, scope, res_key);
ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key);
break;
case PRO_CLEAR:
return core_scsi3_emulate_pro_clear(cmd, res_key);
ret = core_scsi3_emulate_pro_clear(cmd, res_key);
break;
case PRO_PREEMPT:
return core_scsi3_emulate_pro_preempt(cmd, type, scope,
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 0);
break;
case PRO_PREEMPT_AND_ABORT:
return core_scsi3_emulate_pro_preempt(cmd, type, scope,
ret = core_scsi3_emulate_pro_preempt(cmd, type, scope,
res_key, sa_res_key, 1);
break;
case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
return core_scsi3_emulate_pro_register(cmd,
ret = core_scsi3_emulate_pro_register(cmd,
0, sa_res_key, aptpl, all_tg_pt, spec_i_pt, 1);
break;
case PRO_REGISTER_AND_MOVE:
return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key,
sa_res_key, aptpl, unreg);
break;
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
break;
}
return PYX_TRANSPORT_INVALID_CDB_FIELD;
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
/*
......@@ -4190,29 +4237,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
int target_scsi3_emulate_pr_in(struct se_task *task)
{
switch (cdb[1] & 0x1f) {
case PRI_READ_KEYS:
return core_scsi3_pri_read_keys(cmd);
case PRI_READ_RESERVATION:
return core_scsi3_pri_read_reservation(cmd);
case PRI_REPORT_CAPABILITIES:
return core_scsi3_pri_report_capabilities(cmd);
case PRI_READ_FULL_STATUS:
return core_scsi3_pri_read_full_status(cmd);
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
}
struct se_cmd *cmd = task->task_se_cmd;
int ret;
int core_scsi3_emulate_pr(struct se_cmd *cmd)
{
unsigned char *cdb = &cmd->t_task_cdb[0];
struct se_device *dev = cmd->se_dev;
/*
* Following spc2r20 5.5.1 Reservations overview:
*
......@@ -4222,16 +4251,38 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
* initiator or service action and shall terminate with a RESERVATION
* CONFLICT status.
*/
if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
return (cdb[0] == PERSISTENT_RESERVE_OUT) ?
core_scsi3_emulate_pr_out(cmd, cdb) :
core_scsi3_emulate_pr_in(cmd, cdb);
switch (cmd->t_task_cdb[1] & 0x1f) {
case PRI_READ_KEYS:
ret = core_scsi3_pri_read_keys(cmd);
break;
case PRI_READ_RESERVATION:
ret = core_scsi3_pri_read_reservation(cmd);
break;
case PRI_REPORT_CAPABILITIES:
ret = core_scsi3_pri_report_capabilities(cmd);
break;
case PRI_READ_FULL_STATUS:
ret = core_scsi3_pri_read_full_status(cmd);
break;
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
break;
}
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return ret;
}
static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
......
......@@ -47,7 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int target_scsi2_reservation_release(struct se_task *task);
extern int target_scsi2_reservation_reserve(struct se_task *task);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
......@@ -61,7 +62,9 @@ extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
struct se_cmd *);
extern int core_scsi3_emulate_pr(struct se_cmd *);
extern int target_scsi3_emulate_pr_in(struct se_task *task);
extern int target_scsi3_emulate_pr_out(struct se_task *task);
extern int core_setup_reservations(struct se_device *, int);
#endif /* TARGET_CORE_PR_H */
......@@ -1091,7 +1091,7 @@ static int pscsi_do_task(struct se_task *task)
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (!req) {
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
......
......@@ -118,7 +118,7 @@ static void core_tmr_drain_tmr_list(
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
if (tmr && (tmr_p == tmr))
if (tmr_p == tmr)
continue;
cmd = tmr_p->task_cmd;
......@@ -147,19 +147,18 @@ static void core_tmr_drain_tmr_list(
}
spin_unlock(&cmd->t_state_lock);
list_move_tail(&tmr->tmr_list, &drain_tmr_list);
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
while (!list_empty(&drain_tmr_list)) {
tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
list_del(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
list_del_init(&tmr_p->tmr_list);
cmd = tmr_p->task_cmd;
pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr,
tmr->function, tmr->response, cmd->t_state);
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
transport_cmd_finish_abort(cmd, 1);
}
......@@ -330,16 +329,6 @@ static void core_tmr_drain_cmd_list(
*/
if (prout_cmd == cmd)
continue;
/*
* Skip direct processing of TRANSPORT_FREE_CMD_INTR for
* HW target mode fabrics.
*/
spin_lock(&cmd->t_state_lock);
if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) {
spin_unlock(&cmd->t_state_lock);
continue;
}
spin_unlock(&cmd->t_state_lock);
atomic_set(&cmd->t_transport_queue_active, 0);
atomic_dec(&qobj->queue_cnt);
......
......@@ -156,7 +156,7 @@ int ft_lport_notify(struct notifier_block *, unsigned long, void *);
/*
* IO methods.
*/
void ft_check_stop_free(struct se_cmd *);
int ft_check_stop_free(struct se_cmd *);
void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
......
......@@ -112,9 +112,10 @@ void ft_release_cmd(struct se_cmd *se_cmd)
ft_free_cmd(cmd);
}
void ft_check_stop_free(struct se_cmd *se_cmd)
int ft_check_stop_free(struct se_cmd *se_cmd)
{
transport_generic_free_cmd(se_cmd, 0);
return 1;
}
/*
......
......@@ -89,7 +89,6 @@ enum transport_state_table {
TRANSPORT_PROCESS_TMR = 9,
TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_FREE_CMD_INTR = 17,
TRANSPORT_COMPLETE_QF_WP = 18,
TRANSPORT_COMPLETE_QF_OK = 19,
};
......@@ -115,7 +114,6 @@ enum se_cmd_flags_table {
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
......@@ -426,6 +424,9 @@ struct se_cmd {
enum transport_state_table t_state;
/* Transport specific error status */
int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
int check_release:1;
int cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
......@@ -452,8 +453,10 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_queue_node;
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
int (*execute_task)(struct se_task *);
void (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
......@@ -559,12 +562,16 @@ struct se_node_acl {
} ____cacheline_aligned;
struct se_session {
int sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
} ____cacheline_aligned;
struct se_device;
......
......@@ -17,7 +17,7 @@ extern int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern int transport_core_report_lun_response(struct se_cmd *);
extern int target_report_luns(struct se_task *);
extern void se_release_device_for_hba(struct se_device *);
extern void se_release_vpd_for_dev(struct se_device *);
extern void se_clear_dev_ports(struct se_device *);
......
......@@ -46,9 +46,16 @@ struct target_core_fabric_ops {
int (*new_cmd_map)(struct se_cmd *);
/*
* Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop()
* I/O descriptor in transport_cmd_check_stop().
*
* Returning 1 will signal a descriptor has been released.
* Returning 0 will signal a descriptor has not been released.
*/
void (*check_stop_free)(struct se_cmd *);
int (*check_stop_free)(struct se_cmd *);
/*
* Optional check for active I/O shutdown
*/
int (*check_release_cmd)(struct se_cmd *);
void (*release_cmd)(struct se_cmd *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
......
......@@ -160,17 +160,20 @@ extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void transport_generic_free_cmd_intr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern void transport_wait_for_tasks(struct se_cmd *);
extern bool transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int);
extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
extern void target_splice_sess_cmd_list(struct se_session *);
extern void target_wait_for_sess_cmds(struct se_session *, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册