提交 5bd665f2 编写于 作者: L Linus Torvalds

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull target updates from Nicholas Bellinger:
 "It has been a very busy development cycle this time around in target
  land, with the highlights including:

   - Kill struct se_subsystem_dev, in favor of direct se_device usage
     (hch)
   - Simplify reservations code by combining SPC-3 + SCSI-2 support for
     virtual backends only (hch)
   - Simplify ALUA code for virtual only backends, and remove left over
     abstractions (hch)
   - Pass sense_reason_t as return value for I/O submission path (hch)
   - Refactor MODE_SENSE emulation to allow for easier addition of new
     mode pages.  (roland)
   - Add emulation of MODE_SELECT (roland)
   - Fix bug in handling of ExpStatSN wrap-around (steve)
   - Fix bug in TMR ABORT_TASK lookup in qla2xxx target (steve)
   - Add WRITE_SAME w/ UNMAP=0 support for IBLOCK backends (nab)
   - Convert ib_srpt to use modern target_submit_cmd caller + drop
     legacy ioctx->kref usage (nab)
   - Convert ib_srpt to use modern target_submit_tmr caller (nab)
   - Add link_magic for fabric allow_link destination target_items for
     symlinks within target_core_fabric_configfs.c code (nab)
   - Allocate pointers in instead of full structs for
     config_group->default_groups (sebastian)
   - Fix 32-bit highmem breakage for FILEIO (sebastian)

  All told, hch was able to shave off another ~1K LOC by killing the
  se_subsystem_dev abstraction, along with a number of PR + ALUA
  simplifications.  Also, a nice patch by Roland is the refactoring of
  MODE_SENSE handling, along with the addition of initial MODE_SELECT
  emulation support for virtual backends.

  Sebastian found a long-standing issue wrt to allocation of full
  config_group instead of pointers for config_group->default_group[]
  setup in a number of areas, which ends up saving memory with big
  configurations.  He also managed to fix another long-standing BUG wrt
  to broken 32-bit highmem support within the FILEIO backend driver.

  Thank you again to everyone who contributed this round!"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (50 commits)
  target/iscsi_target: Add NodeACL tags for initiator group support
  target/tcm_fc: fix the lockdep warning due to inconsistent lock state
  sbp-target: fix error path in sbp_make_tpg()
  sbp-target: use simple assignment in tgt_agent_rw_agent_state()
  iscsi-target: use kstrdup() for iscsi_param
  target/file: merge fd_do_readv() and fd_do_writev()
  target/file: Fix 32-bit highmem breakage for SGL -> iovec mapping
  target: Add link_magic for fabric allow_link destination target_items
  ib_srpt: Convert TMR path to target_submit_tmr
  ib_srpt: Convert I/O path to target_submit_cmd + drop legacy ioctx->kref
  target: Make spc_get_write_same_sectors return sector_t
  target/configfs: use kmalloc() instead of kzalloc() for default groups
  target/configfs: allocate only 6 slots for dev_cg->default_groups
  target/configfs: allocate pointers instead of full struct for default_groups
  target: update error handling for sbc_setup_write_same()
  iscsit: use GFP_ATOMIC under spin lock
  iscsi_target: Remove redundant null check before kfree
  target/iblock: Forward declare bio helpers
  target: Clean up flow in transport_check_aborted_status()
  target: Clean up logic in transport_put_cmd()
  ...
......@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
kref_init(&ioctx->kref);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rbuf = 0;
......@@ -1290,39 +1289,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
}
/**
* srpt_put_send_ioctx() - Free up resources.
*/
static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
{
struct srpt_rdma_ch *ch;
unsigned long flags;
BUG_ON(!ioctx);
ch = ioctx->ch;
BUG_ON(!ch);
WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
}
static void srpt_put_send_ioctx_kref(struct kref *kref)
{
srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
}
/**
* srpt_abort_cmd() - Abort a SCSI command.
* @ioctx: I/O context associated with the SCSI command.
......@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (state == SRPT_STATE_DONE)
if (state == SRPT_STATE_DONE) {
struct srpt_rdma_ch *ch = ioctx->ch;
BUG_ON(ch->sess == NULL);
target_put_sess_cmd(ch->sess, &ioctx->cmd);
goto out;
}
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->tag);
......@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
case SRPT_STATE_MGMT_RSP_SENT:
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break;
default:
WARN_ON("ERROR: unexpected command state");
......@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
&& state != SRPT_STATE_DONE))
pr_debug("state = %d\n", state);
if (state != SRPT_STATE_DONE)
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
else
if (state != SRPT_STATE_DONE) {
srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
printk(KERN_ERR "IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
}
}
/**
......@@ -1712,10 +1686,10 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
static int srpt_check_stop_free(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx;
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
/**
......@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
int ret;
sense_reason_t ret;
int rc;
BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf;
kref_get(&send_ioctx->kref);
cmd = &send_ioctx->cmd;
send_ioctx->tag = srp_cmd->tag;
......@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
if (ret) {
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
}
cmd->data_length = data_len;
cmd->data_direction = dir;
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
sizeof(srp_cmd->lun));
if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
&send_ioctx->sense_data[0], unpacked_lun, data_len,
MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto send_sense;
}
ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
if (ret < 0) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
srpt_queue_status(cmd);
return 0;
} else
goto send_sense;
}
transport_handle_cdb_direct(cmd);
return 0;
send_sense:
transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
0);
transport_send_check_condition_and_sense(cmd, ret, 0);
return -1;
}
......@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
{
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
uint64_t unpacked_lun;
uint32_t tag = 0;
int tcm_tmr;
int res;
int rc;
BUG_ON(!send_ioctx);
......@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
send_ioctx->tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
if (tcm_tmr < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
goto process_tmr;
}
res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
if (res < 0) {
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto process_tmr;
goto fail;
}
unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
sizeof(srp_tsk->lun));
res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
if (res) {
pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
goto process_tmr;
}
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
process_tmr:
kref_get(&send_ioctx->kref);
if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
transport_generic_handle_tmr(&send_ioctx->cmd);
else
transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
if (rc < 0) {
send_ioctx->cmd.se_tmr_req->response =
TMR_TASK_DOES_NOT_EXIST;
goto fail;
}
tag = srp_tsk->task_tag;
}
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
srp_tsk, tcm_tmr, GFP_KERNEL, tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto fail;
}
return;
fail:
transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
}
/**
......@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
}
transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
0, DMA_NONE, MSG_SIMPLE_TAG,
send_ioctx->sense_data);
switch (srp_cmd->opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
......@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
......@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
sdev = ch->sport->sdev;
BUG_ON(!sdev);
transport_deregister_session_configfs(ch->sess);
transport_deregister_session(ch->sess);
se_sess = ch->sess;
BUG_ON(!se_sess);
target_wait_for_sess_cmds(se_sess, 0);
transport_deregister_session_configfs(se_sess);
transport_deregister_session(se_sess);
ch->sess = NULL;
srpt_destroy_ch_ib(ch);
......@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
}
out:
......@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
static void srpt_release_cmd(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
WARN_ON(ioctx->mapped_sg_count != 0);
if (ioctx->n_rbuf > 1) {
kfree(ioctx->rbufs);
ioctx->rbufs = NULL;
ioctx->n_rbuf = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
list_add(&ioctx->free_list, &ch->free_list);
spin_unlock_irqrestore(&ch->spinlock, flags);
}
/**
......
......@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
struct kref kref;
struct rdma_iu *rdma_ius;
struct srp_direct_buf *rbufs;
struct srp_direct_buf single_rbuf;
......
......@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{
struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
struct se_cmd *se_cmd;
u32 lun = 0;
int rc;
bool found_lun = false;
spin_lock(&se_sess->sess_cmd_lock);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (cmd->tag == abts->exchange_addr_to_abort) {
lun = cmd->unpacked_lun;
found_lun = true;
break;
}
}
spin_unlock(&se_sess->sess_cmd_lock);
if (!found_lun)
return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
......@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
......
......@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
transport_generic_request_failure(&cmd->se_cmd);
transport_generic_request_failure(&cmd->se_cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
......
......@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
(cmd->stat_sn < exp_statsn)) {
iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
iscsit_add_cmd_to_immediate_queue(cmd, conn,
......@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
struct iscsi_conn *conn,
unsigned char *buf)
{
int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
int dump_immediate_data = 0, send_check_condition = 0, payload_length;
struct iscsi_cmd *cmd = NULL;
int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
struct iscsi_cmd *cmd = NULL;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
int sam_task_attr;
......@@ -956,38 +955,26 @@ static int iscsit_handle_scsi_cmd(
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
/*
* The CDB is going to an se_device_t.
*/
ret = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
pr_debug("Responding to non-acl'ed,"
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (cmd->sense_reason)
goto attach_cmd;
cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
}
send_check_condition = 1;
goto attach_cmd;
}
transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (transport_ret == -ENOMEM) {
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
} else if (transport_ret < 0) {
/*
* Unsupported SAM Opcode. CHECK_CONDITION will be sent
* in iscsit_execute_cmd() during the CmdSN OOO Execution
* Mechinism.
*/
send_check_condition = 1;
} else {
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
}
attach_cmd:
......@@ -1000,11 +987,12 @@ static int iscsit_handle_scsi_cmd(
*/
core_alua_check_nonop_delay(&cmd->se_cmd);
ret = iscsit_allocate_iovecs(cmd);
if (ret < 0)
if (iscsit_allocate_iovecs(cmd) < 0) {
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 0, buf, cmd);
}
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
......@@ -1031,10 +1019,7 @@ static int iscsit_handle_scsi_cmd(
* If no Immediate Data is attached, it's OK to return now.
*/
if (!cmd->immediate_data) {
if (send_check_condition)
return 0;
if (cmd->unsolicited_data) {
if (!cmd->sense_reason && cmd->unsolicited_data) {
iscsit_set_dataout_sequence_values(cmd);
spin_lock_bh(&cmd->dataout_timeout_lock);
......@@ -1050,19 +1035,17 @@ static int iscsit_handle_scsi_cmd(
* thread. They are processed in CmdSN order by
* iscsit_check_received_cmdsn() below.
*/
if (send_check_condition) {
if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
}
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
if (ret < 0) {
cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
if (cmd->sense_reason) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
}
......@@ -1079,7 +1062,7 @@ static int iscsit_handle_scsi_cmd(
* Special case for Unsupported SAM WRITE Opcodes
* and ImmediateData=Yes.
*/
if (dump_immediate_data) {
if (cmd->sense_reason) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return -1;
} else if (cmd->unsolicited_data) {
......@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
(se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
......@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
ret = transport_lookup_tmr_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
if (ret < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
}
......@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
if (se_tmr->response)
goto attach;
}
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
case ISCSI_TM_FUNC_CLEAR_ACA:
......@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
goto attach;
}
......@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
* Perform sanity checks on the ExpDataSN only if the
* TASK_REASSIGN was successful.
*/
if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
if (se_tmr->response)
break;
if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
......@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
default:
pr_err("Unknown TMR function: 0x%02x, protocol"
" error.\n", function);
cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
goto attach;
}
......@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
if (!conn_p)
return;
cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
......
......@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
static ssize_t lio_target_nacl_show_tag(
struct se_node_acl *se_nacl,
char *page)
{
return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
}
static ssize_t lio_target_nacl_store_tag(
struct se_node_acl *se_nacl,
const char *page,
size_t count)
{
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
if (ret < 0)
return ret;
return count;
}
TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_initiator_attrs[] = {
&lio_target_nacl_info.attr,
&lio_target_nacl_cmdsn_depth.attr,
&lio_target_nacl_tag.attr,
NULL,
};
......@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
stats_cg = &se_nacl->acl_fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
......@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
*/
stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
GFP_KERNEL);
if (!stats_cg->default_groups) {
pr_err("Unable to allocate memory for"
......
......@@ -474,7 +474,7 @@ struct iscsi_cmd {
struct scatterlist *first_data_sg;
u32 first_data_sg_off;
u32 kmapped_nents;
sense_reason_t sense_reason;
} ____cacheline_aligned;
struct iscsi_tmr_req {
......
......@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
case ISCSI_OP_SCSI_CMD:
/*
* Go ahead and send the CHECK_CONDITION status for
* any SCSI CDB exceptions that may have occurred, also
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
* any SCSI CDB exceptions that may have occurred.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
......@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* exception
*/
return transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
cmd->sense_reason, 0);
}
/*
* Special case for delayed CmdSN with Immediate
......@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
break;
case ISCSI_OP_SCSI_TMFUNC:
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
if (cmd->se_cmd.se_tmr_req->response) {
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
cmd->i_state);
......
......@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* made generic here.
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
......
......@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param = iscsi_find_param_from_key(
INITIATORNAME, conn->param_list);
if (!initiatorname_param)
return -1;
sessiontype_param = iscsi_find_param_from_key(
SESSIONTYPE, conn->param_list);
if (!sessiontype_param)
if (!initiatorname_param || !sessiontype_param) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_MISSING_FIELDS);
return -1;
}
sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
......@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
kfree(sess);
return -ENOMEM;
}
spin_lock(&sess_idr_lock);
spin_lock_bh(&sess_idr_lock);
ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
spin_unlock(&sess_idr_lock);
spin_unlock_bh(&sess_idr_lock);
if (ret < 0) {
pr_err("idr_get_new() for sess_idr failed\n");
......@@ -1118,10 +1118,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
idr_remove(&sess_idr, conn->sess->session_index);
spin_unlock_bh(&sess_idr_lock);
}
if (conn->sess->sess_ops)
kfree(conn->sess->sess_ops);
if (conn->sess)
kfree(conn->sess);
kfree(conn->sess->sess_ops);
kfree(conn->sess);
old_sess_out:
iscsi_stop_login_thread_timer(np);
/*
......
......@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->req_buf,
payload_length,
conn);
if (ret < 0)
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (login->first_request)
if (iscsi_target_check_first_request(conn, login) < 0)
......@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
login->rsp_buf,
&login->rsp_length,
conn->param_list);
if (ret < 0)
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
if (!login->auth_complete &&
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
......
......@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
}
INIT_LIST_HEAD(&param->p_list);
param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
param->name = kstrdup(name, GFP_KERNEL);
if (!param->name) {
pr_err("Unable to allocate memory for parameter name.\n");
goto out;
}
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for parameter value.\n");
goto out;
}
memcpy(param->name, name, strlen(name));
param->name[strlen(name)] = '\0';
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
param->phase = phase;
param->scope = scope;
param->sender = sender;
......@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
list_del(&param->p_list);
kfree(param->name);
param->name = NULL;
kfree(param->value);
param->value = NULL;
kfree(param);
param = NULL;
}
iscsi_release_extra_responses(param_list);
......@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
{
kfree(param->value);
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
param->value = kstrdup(value, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
return -ENOMEM;
}
memcpy(param->value, value, strlen(value));
param->value[strlen(value)] = '\0';
pr_debug("iSCSI Parameter updated to %s=%s\n",
param->name, param->value);
return 0;
......
......@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
if (!ref_cmd) {
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
......
......@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
return NULL;
}
list_for_each_entry(ts, &inactive_ts_list, ts_list)
break;
ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
list_del(&ts->ts_list);
iscsit_global->inactive_ts--;
......
......@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
spin_unlock_bh(&conn->immed_queue_lock);
return NULL;
}
list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
break;
qr = list_first_entry(&conn->immed_queue_list,
struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
......@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
return NULL;
}
list_for_each_entry(qr, &conn->response_queue_list, qr_list)
break;
qr = list_first_entry(&conn->response_queue_list,
struct iscsi_queue_req, qr_list);
list_del(&qr->qr_list);
if (qr->cmd)
......
......@@ -53,7 +53,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
struct se_device_s *se_dev_hba_ptr;
struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
......
config SBP_TARGET
tristate "FireWire SBP-2 fabric module"
depends on FIREWIRE && EXPERIMENTAL
depends on FIREWIRE
help
Say Y or M here to enable SCSI target functionality over FireWire.
This enables you to expose SCSI devices to other nodes on the FireWire
......
......@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
struct sbp_target_agent *agent)
{
__be32 state;
int state;
switch (tcode) {
case TCODE_READ_QUADLET_REQUEST:
pr_debug("tgt_agent AGENT_STATE READ\n");
spin_lock_bh(&agent->lock);
state = cpu_to_be32(agent->state);
state = agent->state;
spin_unlock_bh(&agent->lock);
memcpy(data, &state, sizeof(state));
*(__be32 *)data = cpu_to_be32(state);
return RCODE_COMPLETE;
......@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
tport->mgt_agt = sbp_management_agent_register(tport);
if (IS_ERR(tport->mgt_agt)) {
ret = PTR_ERR(tport->mgt_agt);
kfree(tpg);
return ERR_PTR(ret);
goto out_free_tpg;
}
ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, (void *)tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
sbp_management_agent_unregister(tport->mgt_agt);
kfree(tpg);
return ERR_PTR(ret);
}
if (ret < 0)
goto out_unreg_mgt_agt;
return &tpg->se_tpg;
out_unreg_mgt_agt:
sbp_management_agent_unregister(tport->mgt_agt);
out_free_tpg:
tport->tpg = NULL;
kfree(tpg);
return ERR_PTR(ret);
}
static void sbp_drop_tpg(struct se_portal_group *se_tpg)
......
此差异已折叠。
......@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int target_emulate_report_target_port_groups(struct se_cmd *);
extern int target_emulate_set_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
......@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int);
struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
......@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
extern int core_setup_alua(struct se_device *, int);
extern int core_setup_alua(struct se_device *);
extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
#endif /* TARGET_CORE_ALUA_H */
此差异已折叠。
......@@ -4,10 +4,9 @@
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
* Copyright (c) 2010,2011 Rising Tide Systems
* Copyright (c) 2010,2011 Linux-iSCSI.org
* (c) Copyright 2010-2012 RisingTide Systems LLC.
*
* Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
" %p to struct lun: %p\n", lun_ci, lun);
return -EFAULT;
}
/*
* Ensure that the source port exists
*/
......@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
}
lacl_cg = &lacl->se_lun_group;
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
pr_err("Unable to allocate lacl_cg->default_groups\n");
......@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
pr_err("Unable to allocate ml_stat_grp->default_groups\n");
......@@ -734,17 +739,21 @@ static int target_fabric_port_link(
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of(
to_config_group(se_dev_ci), struct se_subsystem_dev,
se_dev_group);
struct se_device *dev =
container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
struct target_fabric_configfs *tf;
int ret;
if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
" %p to struct se_device: %p\n", se_dev_ci, dev);
return -EFAULT;
}
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
......@@ -755,14 +764,6 @@ static int target_fabric_port_link(
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
if (!dev) {
pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
......@@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
pr_err("Unable to allocate lun_cg->default_groups\n");
......
......@@ -4,8 +4,7 @@
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
* Copyright (c) 2010 Rising Tide Systems, Inc.
* Copyright (c) 2010 Linux-iSCSI.org
* (c) Copyright 2010-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
......
......@@ -3,10 +3,7 @@
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
* Copyright (c) 2005 PyX Technologies, Inc.
* Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* (c) Copyright 2005-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
......@@ -41,7 +38,10 @@
#include "target_core_file.h"
static struct se_subsystem_api fileio_template;
static inline struct fd_dev *FD_DEV(struct se_device *dev)
{
return container_of(dev, struct fd_dev, dev);
}
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
......@@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = hba->hba_ptr;
......@@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
return &fd_dev->dev;
}
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
*
*
*/
static struct se_device *fd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
static int fd_configure_device(struct se_device *dev)
{
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
struct fd_dev *fd_dev = p;
struct fd_host *fd_host = hba->hba_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
struct fd_host *fd_host = dev->se_hba->hba_ptr;
struct file *file;
struct inode *inode = NULL;
int dev_flags = 0, flags, ret = -EINVAL;
int flags, ret = -EINVAL;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
/*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
/*
* Optionally allow fd_buffered_io=1 to be enabled for people
* who want use the fs buffer cache as an WriteCache mechanism.
......@@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct request_queue *q;
struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(inode->i_bdev);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
dev->dev_attrib.hw_block_size =
bdev_logical_block_size(inode->i_bdev);
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
......@@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
goto fail;
}
limits = &dev_limits.limits;
limits->logical_block_size = FD_BLOCKSIZE;
limits->max_hw_sectors = FD_MAX_SECTORS;
limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
}
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!dev)
goto fail;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
" with FDBD_HAS_BUFFERED_IO_WCE\n");
dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
dev->dev_attrib.emulate_write_cache = 1;
}
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
......@@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return dev;
return 0;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
return ERR_PTR(ret);
return ret;
}
/* fd_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_device(void *p)
static void fd_free_device(struct se_device *dev)
{
struct fd_dev *fd_dev = p;
struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
......@@ -239,17 +216,16 @@ static void fd_free_device(void *p)
kfree(fd_dev);
}
static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents)
static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, int is_write)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr;
struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba *
se_dev->se_sub_dev->se_dev_attrib.block_size);
loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
......@@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
if (is_write)
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
else
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs);
for_each_sg(sgl, sg, sgl_nents, i)
kunmap(sg_page(sg));
kfree(iov);
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (is_write) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)cmd->data_length);
pr_err("%s() write returned %d\n", __func__, ret);
return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
return ret;
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
cmd->data_length);
return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
return ret;
}
}
}
return 1;
}
static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (cmd->t_task_lba *
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_writev iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_writev() returned %d\n", ret);
return (ret < 0 ? ret : -EINVAL);
}
return 1;
}
static int fd_execute_sync_cache(struct se_cmd *cmd)
static sense_reason_t
fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
......@@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
......@@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
if (immed)
return 0;
if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else {
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
return 0;
}
static int fd_execute_rw(struct se_cmd *cmd)
static sense_reason_t
fd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
......@@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(cmd, sgl, sgl_nents);
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
} else {
ret = fd_do_writev(cmd, sgl, sgl_nents);
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
/*
* Perform implict vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
dev->dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
struct fd_dev *fd_dev = dev->dev_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
loff_t start = cmd->t_task_lba *
dev->se_sub_dev->se_dev_attrib.block_size;
dev->dev_attrib.block_size;
loff_t end = start + cmd->data_length;
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
}
if (ret < 0) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
......@@ -430,12 +381,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
static ssize_t fd_set_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
......@@ -502,24 +451,9 @@ static ssize_t fd_set_configfs_dev_params(
return (!ret) ? count : ret;
}
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
return -EINVAL;
}
return 0;
}
static ssize_t fd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
......@@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
return bl;
}
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
/* fd_get_device_type(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *f = fd_dev->fd_file;
struct inode *i = f->f_mapping->host;
unsigned long long dev_size;
......@@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
else
dev_size = fd_dev->fd_dev_size;
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
return div_u64(dev_size, dev->dev_attrib.block_size);
}
static struct spc_ops fd_spc_ops = {
static struct sbc_ops fd_sbc_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
};
static int fd_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &fd_spc_ops);
return sbc_parse_cdb(cmd, &fd_sbc_ops);
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.alloc_device = fd_alloc_device,
.configure_device = fd_configure_device,
.free_device = fd_free_device,
.parse_cdb = fd_parse_cdb,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_rev = fd_get_device_rev,
.get_device_type = fd_get_device_type,
.get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
};
......
......@@ -17,6 +17,8 @@
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
struct fd_dev {
struct se_device dev;
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
......
......@@ -3,10 +3,7 @@
*
* This file contains the TCM HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
......@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
mutex_init(&hba->hba_access_mutex);
......@@ -152,8 +148,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
int
core_delete_hba(struct se_hba *hba)
{
if (!list_empty(&hba->hba_dev_list))
dump_stack();
WARN_ON(hba->dev_count);
hba->transport->detach_hba(hba);
......
此差异已折叠。
......@@ -14,6 +14,7 @@ struct iblock_req {
#define IBDF_HAS_UDEV_PATH 0x01
struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
struct bio_set *ibd_bio_set;
......
......@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
int target_report_luns(struct se_cmd *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
int se_dev_check_online(struct se_device *);
int se_dev_check_shutdown(struct se_device *);
void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
int se_dev_set_task_timeout(struct se_device *, u32);
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
int se_dev_set_unmap_granularity(struct se_device *, u32);
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
int se_dev_set_max_write_same_len(struct se_device *, u32);
int se_dev_set_emulate_dpo(struct se_device *, int);
int se_dev_set_emulate_fua_write(struct se_device *, int);
int se_dev_set_emulate_fua_read(struct se_device *, int);
......@@ -60,6 +54,9 @@ void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
......@@ -105,10 +102,11 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
......
此差异已折叠。
......@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int target_scsi2_reservation_release(struct se_cmd *);
extern int target_scsi2_reservation_reserve(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
......@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int);
extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
extern sense_reason_t target_check_reservation(struct se_cmd *);
#endif /* TARGET_CORE_PR_H */
此差异已折叠。
......@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
struct se_device dev;
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
......@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
......
......@@ -4,10 +4,7 @@
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* (c) Copyright 2003-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
......@@ -41,7 +38,10 @@
#include "target_core_rd.h"
static struct se_subsystem_api rd_mcp_template;
static inline struct rd_dev *RD_DEV(struct se_device *dev)
{
return container_of(dev, struct rd_dev, dev);
}
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
......@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0;
}
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
......@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
rd_dev->rd_host = rd_host;
return rd_dev;
return &rd_dev->dev;
}
static struct se_device *rd_create_virtdevice(struct se_hba *hba,
struct se_subsystem_dev *se_dev, void *p)
static int rd_configure_device(struct se_device *dev)
{
struct se_device *dev;
struct se_dev_limits dev_limits;
struct rd_dev *rd_dev = p;
struct rd_host *rd_host = hba->hba_ptr;
int dev_flags = 0, ret;
char prod[16], rev[4];
struct rd_dev *rd_dev = RD_DEV(dev);
struct rd_host *rd_host = dev->se_hba->hba_ptr;
int ret;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
ret = rd_build_device_space(rd_dev);
if (ret < 0)
goto fail;
snprintf(prod, 16, "RAMDISK-MCP");
snprintf(rev, 4, "%s", RD_MCP_VERSION);
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = UINT_MAX;
dev_limits.limits.max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
&rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
if (!dev)
goto fail;
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
......@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return dev;
return 0;
fail:
rd_release_device_space(rd_dev);
return ERR_PTR(ret);
return ret;
}
static void rd_free_device(void *p)
static void rd_free_device(struct se_device *dev)
{
struct rd_dev *rd_dev = p;
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
kfree(rd_dev);
......@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
static int rd_execute_rw(struct se_cmd *cmd)
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
......@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
u32 src_len;
u64 tmp;
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
return -EINVAL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
......@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
return -EINVAL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* since we increment, the first sg entry is correct */
......@@ -378,13 +367,10 @@ static match_table_t tokens = {
{Opt_err, NULL}
};
static ssize_t rd_set_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
......@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
return (!ret) ? count : ret;
}
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
struct rd_dev *rd_dev = RD_DEV(dev);
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
pr_debug("Missing rd_pages= parameter\n");
return -EINVAL;
}
return 0;
}
static ssize_t rd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
......@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
return bl;
}
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 rd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
struct rd_dev *rd_dev = RD_DEV(dev);
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
dev->se_sub_dev->se_dev_attrib.block_size) - 1;
dev->dev_attrib.block_size) - 1;
return blocks_long;
}
static struct spc_ops rd_spc_ops = {
static struct sbc_ops rd_sbc_ops = {
.execute_rw = rd_execute_rw,
};
static int rd_parse_cdb(struct se_cmd *cmd)
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
return sbc_parse_cdb(cmd, &rd_spc_ops);
return sbc_parse_cdb(cmd, &rd_sbc_ops);
}
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice,
.alloc_device = rd_alloc_device,
.configure_device = rd_configure_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_device_type = sbc_get_device_type,
.get_blocks = rd_get_blocks,
};
......
......@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
struct se_device dev;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -3,8 +3,7 @@
*
* This file contains SPC-3 task management infrastructure
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
* (c) Copyright 2009-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
......@@ -371,7 +370,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
tas = dev->dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
......@@ -399,10 +398,10 @@ int core_tmr_lun_reset(
* LOGICAL UNIT RESET
*/
if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
......
......@@ -3,10 +3,7 @@
*
* This file contains generic Target Portal Group related functions.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
* (c) Copyright 2002-2012 RisingTide Systems LLC.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
......@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
/* core_tpg_set_initiator_node_tag():
*
* Initiator nodeacl tags are not used internally, but may be used by
* userspace to emulate aliases or groups.
* Returns length of newly-set tag or -EINVAL.
*/
int core_tpg_set_initiator_node_tag(
struct se_portal_group *tpg,
struct se_node_acl *acl,
const char *new_tag)
{
if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
return -EINVAL;
if (!strncmp("NULL", new_tag, 4)) {
acl->acl_tag[0] = '\0';
return 0;
}
return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
......@@ -672,6 +692,7 @@ int core_tpg_register(
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
......
此差异已折叠。
......@@ -26,7 +26,7 @@
extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册