提交 e3d6f909 编写于 作者: A Andy Grover 提交者: Nicholas Bellinger

target: Core cleanups from AGrover (round 1)

This patch contains the squashed version of a number of cleanups and
minor fixes from Andy's initial series (round 1) for target core this
past spring.  The condensed log looks like:

target: use errno values instead of returning -1 for everything
target: Rename transport_calc_sg_num to transport_init_task_sg
target: Fix leak in error path in transport_init_task_sg
target/pscsi: Remove pscsi_get_sh() usage
target: Make two runtime checks into WARN_ONs
target: Remove hba queue depth and convert to spin_lock_irq usage
target: dev->dev_status_queue_obj is unused
target: Make struct se_queue_req.cmd type struct se_cmd *
target: Remove __transport_get_qr_from_queue()
target: Rename se_dev->g_se_dev_list to se_dev_node
target: Remove struct se_global
target: Simplify scsi mib index table code
target: Make dev_queue_obj a member of se_device instead of a pointer
target: remove extraneous returns at end of void functions
target: Ensure transport_dump_vpd_ident_type returns null-terminated str
target: Function pointers don't need to use '&' to be assigned
target: Fix comment in __transport_execute_tasks()
target: Misc style cleanups
target: rename struct pr_reservation_template to pr_reservation
target: Remove #defines that just perform indirection
target: Inline transport_get_task_from_execute_queue()
target: Minor header comment fixes
Signed-off-by: NAndy Grover <agrover@redhat.com>
Signed-off-by: NNicholas Bellinger <nab@linux-iscsi.org>
上级 a8c6da90
......@@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
T_TASK(se_cmd)->t_tasks_bidi = 1;
se_cmd->t_task->t_tasks_bidi = 1;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
......@@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
if (T_TASK(se_cmd)->t_tasks_bidi) {
if (se_cmd->t_task->t_tasks_bidi) {
struct scsi_data_buffer *sdb = scsi_in(sc);
mem_bidi_ptr = (void *)sdb->table.sgl;
......@@ -1402,9 +1402,9 @@ static int tcm_loop_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
if (!fabric) {
if (IS_ERR(fabric)) {
printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
return -1;
return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
......
此差异已折叠。
......@@ -64,8 +64,8 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
struct se_lun *lun = SE_LUN(cmd);
struct se_device *dev = SE_DEV(cmd);
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned char *buf = cmd->t_task->t_task_buf;
/*
......@@ -75,7 +75,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
if (cmd->data_length < 6) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
return -1;
return -EINVAL;
}
buf[0] = dev->transport->get_device_type(dev);
......@@ -86,7 +86,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf);
if (cmd->data_length < 8) {
......@@ -107,9 +107,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
&DEV_T10_WWN(dev)->model[0]);
&dev->se_sub_dev->t10_wwn.model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
&DEV_T10_WWN(dev)->revision[0]);
&dev->se_sub_dev->t10_wwn.revision[0]);
buf[4] = 31; /* Set additional length to 31 */
return 0;
}
......@@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
* Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart.
*/
if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = 3;
buf[5] = 0x80;
......@@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
u16 len = 0;
buf[1] = 0x80;
......@@ -152,7 +152,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
u32 unit_serial_len;
unit_serial_len =
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
......@@ -162,7 +162,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
&DEV_T10_WWN(dev)->unit_serial[0]);
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
......@@ -176,15 +176,15 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
struct se_lun *lun = SE_LUN(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
struct se_lun *lun = cmd->se_lun;
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char binary, binary_new;
unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
int i;
......@@ -238,11 +238,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
binary = transport_asciihex_to_binaryhex(
&DEV_T10_WWN(dev)->unit_serial[0]);
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
buf[off++] |= (binary & 0xf0) >> 4;
for (i = 0; i < 24; i += 2) {
binary_new = transport_asciihex_to_binaryhex(
&DEV_T10_WWN(dev)->unit_serial[i+2]);
&dev->se_sub_dev->t10_wwn.unit_serial[i+2]);
buf[off] = (binary & 0x0f) << 4;
buf[off++] |= (binary_new & 0xf0) >> 4;
binary = binary_new;
......@@ -263,7 +263,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
unit_serial_len =
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if ((len + (id_len + 4) +
......@@ -274,7 +274,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
&DEV_T10_WWN(dev)->unit_serial[0]);
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
......@@ -312,7 +312,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
goto check_tpgi;
}
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
......@@ -335,7 +335,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* section 7.5.1 Table 362
*/
check_tpgi:
if (T10_ALUA(dev->se_sub_dev)->alua_type !=
if (dev->se_sub_dev->t10_alua.alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
......@@ -357,7 +357,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
......@@ -409,7 +409,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* section 7.5.1 Table 362
*/
check_scsi_name:
scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
scsi_name_len += 10;
/* Check for 4-byte padding */
......@@ -424,7 +424,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
goto set_len;
}
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
(tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
......@@ -438,9 +438,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
......@@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
......@@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
int have_tp = 0;
/*
......@@ -494,14 +494,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
printk(KERN_INFO "Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
return -1;
return -EINVAL;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
......@@ -523,12 +523,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP or the initiator sent a too
......@@ -540,25 +540,25 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
&buf[32]);
if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
return 0;
......@@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
/*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
......@@ -602,7 +602,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
if (DEV_ATTRIB(dev)->emulate_tpu != 0)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
......@@ -611,7 +611,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
if (DEV_ATTRIB(dev)->emulate_tpws != 0)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
......@@ -620,7 +620,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned char *buf = cmd->t_task->t_task_buf;
unsigned char *cdb = cmd->t_task->t_task_cdb;
......@@ -637,7 +637,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
if (cmd->data_length < 4) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
return -1;
return -EINVAL;
}
buf[0] = dev->transport->get_device_type(dev);
......@@ -656,7 +656,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
return target_emulate_evpd_b2(cmd, buf);
default:
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
return -1;
return -EINVAL;
}
return 0;
......@@ -665,7 +665,7 @@ target_emulate_inquiry(struct se_cmd *cmd)
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned char *buf = cmd->t_task->t_task_buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
......@@ -679,14 +679,14 @@ target_emulate_readcapacity(struct se_cmd *cmd)
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
return 0;
......@@ -695,7 +695,7 @@ target_emulate_readcapacity(struct se_cmd *cmd)
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned char *buf = cmd->t_task->t_task_buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
......@@ -707,15 +707,15 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
return 0;
......@@ -765,8 +765,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
......@@ -779,7 +779,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
......@@ -792,7 +792,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
......@@ -830,7 +830,7 @@ target_modesense_dpofua(unsigned char *buf, int type)
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
char *cdb = cmd->t_task->t_task_cdb;
unsigned char *rbuf = cmd->t_task->t_task_buf;
int type = dev->transport->get_device_type(dev);
......@@ -867,13 +867,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type);
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
......@@ -883,13 +883,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
offset -= 1;
buf[0] = offset & 0xff;
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type);
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
......@@ -963,8 +963,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
static int
target_emulate_unmap(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
sector_t lba;
......@@ -991,7 +991,7 @@ target_emulate_unmap(struct se_task *task)
if (ret < 0) {
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
ret);
return -1;
return ret;
}
ptr += 16;
......@@ -1010,13 +1010,13 @@ target_emulate_unmap(struct se_task *task)
static int
target_emulate_write_same(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_lun->lun_se_dev;
sector_t lba = cmd->t_task->t_task_lba;
unsigned int range;
int ret;
range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
range = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
(unsigned long long)lba, range);
......@@ -1024,7 +1024,7 @@ target_emulate_write_same(struct se_task *task)
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
return -1;
return ret;
}
task->task_scsi_status = GOOD;
......@@ -1035,8 +1035,8 @@ target_emulate_write_same(struct se_task *task)
int
transport_emulate_control_cdb(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_lun->lun_se_dev;
unsigned short service_action;
int ret = 0;
......
......@@ -37,6 +37,7 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
......@@ -52,6 +53,8 @@
#include "target_core_rd.h"
#include "target_core_stat.h"
extern struct t10_alua_lu_gp *default_lu_gp;
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
......@@ -61,6 +64,13 @@ struct target_core_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
static DEFINE_SPINLOCK(se_device_lock);
static LIST_HEAD(se_dev_list);
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
......@@ -298,21 +308,21 @@ struct target_fabric_configfs *target_fabric_configfs_init(
if (!(fabric_mod)) {
printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
return NULL;
return ERR_PTR(-EINVAL);
}
if (!(name)) {
printk(KERN_ERR "Unable to locate passed fabric name\n");
return NULL;
return ERR_PTR(-EINVAL);
}
if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
return NULL;
return ERR_PTR(-EINVAL);
}
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!(tf))
return NULL;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
......@@ -591,7 +601,6 @@ void target_fabric_configfs_deregister(
printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
">>>>>\n");
return;
}
EXPORT_SYMBOL(target_fabric_configfs_deregister);
......@@ -616,7 +625,8 @@ static ssize_t target_core_dev_show_attr_##_name( \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
rb = snprintf(page, PAGE_SIZE, "%u\n", \
(u32)dev->se_sub_dev->se_dev_attrib._name); \
spin_unlock(&se_dev->se_dev_lock); \
\
return rb; \
......@@ -1078,7 +1088,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
PR_REG_ISID_ID_LEN);
*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
......@@ -1100,7 +1110,7 @@ static ssize_t target_core_dev_pr_show_spc2_res(
return *len;
}
*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
......@@ -1116,7 +1126,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
switch (T10_RES(su_dev)->res_type) {
switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
page, &len);
......@@ -1153,7 +1163,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
......@@ -1190,10 +1200,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
......@@ -1217,7 +1227,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
......@@ -1230,7 +1240,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
tfo = TPG_TFO(se_tpg);
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
......@@ -1264,13 +1274,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
spin_lock(&T10_RES(su_dev)->registration_lock);
list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
spin_lock(&su_dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
......@@ -1290,7 +1300,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
len += sprintf(page+len, "%s", buf);
reg_count++;
}
spin_unlock(&T10_RES(su_dev)->registration_lock);
spin_unlock(&su_dev->t10_pr.registration_lock);
if (!(reg_count))
len += sprintf(page+len, "None\n");
......@@ -1315,7 +1325,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
......@@ -1346,7 +1356,7 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
switch (T10_RES(su_dev)->res_type) {
switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
break;
......@@ -1377,11 +1387,11 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
(T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
(su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
......@@ -1396,7 +1406,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
......@@ -1448,7 +1458,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
......@@ -1594,7 +1604,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
......@@ -1842,7 +1852,7 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
if (!(dev))
return -ENODEV;
if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
return len;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
......@@ -1881,7 +1891,7 @@ static ssize_t target_core_store_alua_lu_gp(
if (!(dev))
return -ENODEV;
if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
......@@ -2557,9 +2567,9 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_wwn(tpg),
TPG_TFO(tpg)->tpg_get_tag(tpg),
"/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
......@@ -2748,17 +2758,17 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
INIT_LIST_HEAD(&se_dev->g_se_dev_list);
INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
spin_lock_init(&se_dev->t10_reservation.registration_lock);
spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
spin_lock_init(&se_dev->t10_pr.registration_lock);
spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
......@@ -2784,9 +2794,9 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
spin_lock(&se_global->g_device_lock);
list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
spin_unlock(&se_global->g_device_lock);
spin_lock(&se_device_lock);
list_add_tail(&se_dev->se_dev_node, &se_dev_list);
spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
......@@ -2814,7 +2824,7 @@ static struct config_group *target_core_make_subdev(
if (!(tg_pt_gp))
goto out;
tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(tg_pt_gp_cg->default_groups)) {
......@@ -2827,11 +2837,11 @@ static struct config_group *target_core_make_subdev(
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
......@@ -2846,9 +2856,9 @@ static struct config_group *target_core_make_subdev(
mutex_unlock(&hba->hba_access_mutex);
return &se_dev->se_dev_group;
out:
if (T10_ALUA(se_dev)->default_tg_pt_gp) {
core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
if (se_dev->t10_alua.default_tg_pt_gp) {
core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
se_dev->t10_alua.default_tg_pt_gp = NULL;
}
if (dev_stat_grp)
kfree(dev_stat_grp->default_groups);
......@@ -2881,11 +2891,11 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
spin_lock(&se_global->g_device_lock);
list_del(&se_dev->g_se_dev_list);
spin_unlock(&se_global->g_device_lock);
spin_lock(&se_device_lock);
list_del(&se_dev->se_dev_node);
spin_unlock(&se_device_lock);
dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
......@@ -2893,7 +2903,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
......@@ -2904,7 +2914,7 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
se_dev->t10_alua.default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
......@@ -3130,10 +3140,9 @@ static int __init target_core_init_configfs(void)
INIT_LIST_HEAD(&g_tf_list);
mutex_init(&g_tf_lock);
init_scsi_index_table();
ret = init_se_global();
ret = init_se_kmem_caches();
if (ret < 0)
return -1;
return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
......@@ -3146,29 +3155,29 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
config_group_init_type_name(&se_global->target_core_hbagroup,
config_group_init_type_name(&target_core_hbagroup,
"core", &target_core_cit);
target_cg->default_groups[0] = &se_global->target_core_hbagroup;
target_cg->default_groups[0] = &target_core_hbagroup;
target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
hba_cg = &se_global->target_core_hbagroup;
hba_cg = &target_core_hbagroup;
hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(hba_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
goto out_global;
}
config_group_init_type_name(&se_global->alua_group,
config_group_init_type_name(&alua_group,
"alua", &target_core_alua_cit);
hba_cg->default_groups[0] = &se_global->alua_group;
hba_cg->default_groups[0] = &alua_group;
hba_cg->default_groups[1] = NULL;
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
alua_cg = &se_global->alua_group;
alua_cg = &alua_group;
alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(alua_cg->default_groups)) {
......@@ -3176,9 +3185,9 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
config_group_init_type_name(&se_global->alua_lu_gps_group,
config_group_init_type_name(&alua_lu_gps_group,
"lu_gps", &target_core_alua_lu_gps_cit);
alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
alua_cg->default_groups[0] = &alua_lu_gps_group;
alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
......@@ -3187,7 +3196,7 @@ static int __init target_core_init_configfs(void)
if (IS_ERR(lu_gp))
goto out_global;
lu_gp_cg = &se_global->alua_lu_gps_group;
lu_gp_cg = &alua_lu_gps_group;
lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(lu_gp_cg->default_groups)) {
......@@ -3199,7 +3208,7 @@ static int __init target_core_init_configfs(void)
&target_core_alua_lu_gp_cit);
lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
lu_gp_cg->default_groups[1] = NULL;
se_global->default_lu_gp = lu_gp;
default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
......@@ -3229,9 +3238,9 @@ static int __init target_core_init_configfs(void)
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
if (se_global->default_lu_gp) {
core_alua_free_lu_gp(se_global->default_lu_gp);
se_global->default_lu_gp = NULL;
if (default_lu_gp) {
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
}
if (lu_gp_cg)
kfree(lu_gp_cg->default_groups);
......@@ -3240,8 +3249,8 @@ static int __init target_core_init_configfs(void)
if (hba_cg)
kfree(hba_cg->default_groups);
kfree(target_cg->default_groups);
release_se_global();
return -1;
release_se_kmem_caches();
return ret;
}
static void __exit target_core_exit_configfs(void)
......@@ -3251,10 +3260,9 @@ static void __exit target_core_exit_configfs(void)
struct config_item *item;
int i;
se_global->in_shutdown = 1;
subsys = target_core_subsystem[0];
lu_gp_cg = &se_global->alua_lu_gps_group;
lu_gp_cg = &alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item;
lu_gp_cg->default_groups[i] = NULL;
......@@ -3263,7 +3271,7 @@ static void __exit target_core_exit_configfs(void)
kfree(lu_gp_cg->default_groups);
lu_gp_cg->default_groups = NULL;
alua_cg = &se_global->alua_group;
alua_cg = &alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
item = &alua_cg->default_groups[i]->cg_item;
alua_cg->default_groups[i] = NULL;
......@@ -3272,7 +3280,7 @@ static void __exit target_core_exit_configfs(void)
kfree(alua_cg->default_groups);
alua_cg->default_groups = NULL;
hba_cg = &se_global->target_core_hbagroup;
hba_cg = &target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
item = &hba_cg->default_groups[i]->cg_item;
hba_cg->default_groups[i] = NULL;
......@@ -3287,17 +3295,15 @@ static void __exit target_core_exit_configfs(void)
configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
core_alua_free_lu_gp(se_global->default_lu_gp);
se_global->default_lu_gp = NULL;
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
release_se_global();
return;
release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
......
此差异已折叠。
......@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
lun_access = deve->lun_flags;
else
lun_access =
(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
......@@ -204,7 +204,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
TPG_TFO(se_tpg)->get_fabric_name(),
se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
......@@ -379,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
......@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
int i;
ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
ml_stat_grp = &lacl->ml_stat_grps.stat_group;
for (i = 0; ml_stat_grp->default_groups[i]; i++) {
df_item = &ml_stat_grp->default_groups[i]->cg_item;
ml_stat_grp->default_groups[i] = NULL;
......@@ -914,7 +914,7 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
port_stat_grp = &lun->port_stat_grps.stat_group;
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
......@@ -941,7 +941,7 @@ static void target_fabric_drop_lun(
struct config_group *lun_cg, *port_stat_grp;
int i;
port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
port_stat_grp = &lun->port_stat_grps.stat_group;
for (i = 0; port_stat_grp->default_groups[i]; i++) {
df_item = &port_stat_grp->default_groups[i]->cg_item;
port_stat_grp->default_groups[i] = NULL;
......
......@@ -67,22 +67,19 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
return -1;
return -ENOMEM;
}
fd_host->fd_host_id = host_id;
atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) fd_host;
hba->hba_ptr = fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" Target Core with TCQ Depth: %d MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id,
atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
" MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0;
}
......@@ -282,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd)
return NULL;
}
fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
return &fd_req->fd_task;
}
......@@ -294,13 +291,14 @@ static int fd_do_readv(struct se_task *task)
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
loff_t pos = (task->task_lba *
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
return -1;
return -ENOMEM;
}
for (i = 0; i < task->task_sg_num; i++) {
......@@ -324,13 +322,13 @@ static int fd_do_readv(struct se_task *task)
printk(KERN_ERR "vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
return -1;
return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
return -1;
return ret;
}
}
......@@ -344,13 +342,14 @@ static int fd_do_writev(struct se_task *task)
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
loff_t pos = (task->task_lba *
task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
return -1;
return -ENOMEM;
}
for (i = 0; i < task->task_sg_num; i++) {
......@@ -367,7 +366,7 @@ static int fd_do_writev(struct se_task *task)
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret);
return -1;
return (ret < 0 ? ret : -EINVAL);
}
return 1;
......@@ -375,7 +374,7 @@ static int fd_do_writev(struct se_task *task)
static void fd_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
......@@ -396,7 +395,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
......@@ -446,7 +445,7 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size;
int ret;
......@@ -474,9 +473,9 @@ static int fd_do_task(struct se_task *task)
ret = fd_do_writev(task);
if (ret > 0 &&
DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(cmd)->t_tasks_fua) {
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
cmd->t_task->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
......@@ -599,7 +598,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n");
return -1;
return -EINVAL;
}
return 0;
......@@ -654,7 +653,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
DEV_ATTRIB(dev)->block_size);
dev->se_sub_dev->se_dev_attrib.block_size);
return blocks_long;
}
......
......@@ -4,8 +4,6 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
/* Maximum queuedepth for the FILEIO HBA */
#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
......
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file copntains the iSCSI HBA Transport related functions.
* This file contains the TCM HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
......@@ -45,6 +45,11 @@
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
......@@ -110,15 +115,11 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
atomic_set(&hba->max_queue_depth, 0);
atomic_set(&hba->left_queue_depth, 0);
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
......@@ -129,10 +130,10 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
if (ret < 0)
goto out_module_put;
spin_lock(&se_global->hba_lock);
hba->hba_id = se_global->g_hba_id_counter++;
list_add_tail(&hba->hba_list, &se_global->g_hba_list);
spin_unlock(&se_global->hba_lock);
spin_lock(&hba_lock);
hba->hba_id = hba_id_counter++;
list_add_tail(&hba->hba_node, &hba_list);
spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
......@@ -156,9 +157,9 @@ core_delete_hba(struct se_hba *hba)
hba->transport->detach_hba(hba);
spin_lock(&se_global->hba_lock);
list_del(&hba->hba_list);
spin_unlock(&se_global->hba_lock);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
spin_unlock(&hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
......
......@@ -74,17 +74,14 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
ib_host->iblock_host_id = host_id;
atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) ib_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
" Target Core TCQ Depth: %d\n", hba->hba_id,
ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
hba->hba_id, ib_host->iblock_host_id);
return 0;
}
......@@ -188,15 +185,15 @@ static struct se_device *iblock_create_virtdevice(
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
DEV_ATTRIB(dev)->max_unmap_lba_count =
dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
DEV_ATTRIB(dev)->unmap_granularity =
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity;
DEV_ATTRIB(dev)->unmap_granularity_alignment =
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
......@@ -243,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd)
return NULL;
}
ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
......@@ -257,12 +254,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
if (block_size == DEV_ATTRIB(dev)->block_size)
if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
switch (DEV_ATTRIB(dev)->block_size) {
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
......@@ -276,7 +273,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
switch (DEV_ATTRIB(dev)->block_size) {
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
......@@ -291,7 +288,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
switch (DEV_ATTRIB(dev)->block_size) {
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
......@@ -306,7 +303,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
switch (DEV_ATTRIB(dev)->block_size) {
switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
......@@ -332,9 +329,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
......@@ -401,9 +398,9 @@ static int iblock_do_task(struct se_task *task)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
(DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(task->task_se_cmd)->t_tasks_fua))
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
task->task_se_cmd->t_task->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
......@@ -527,7 +524,7 @@ static ssize_t iblock_check_configfs_dev_params(
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
return -1;
return -EINVAL;
}
return 0;
......@@ -611,7 +608,7 @@ static struct bio *iblock_get_bio(
static int iblock_map_task_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
......@@ -623,17 +620,17 @@ static int iblock_map_task_SG(struct se_task *task)
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
if (DEV_ATTRIB(dev)->block_size == 4096)
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
else if (DEV_ATTRIB(dev)->block_size == 2048)
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
else if (DEV_ATTRIB(dev)->block_size == 1024)
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
else if (DEV_ATTRIB(dev)->block_size == 512)
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", DEV_ATTRIB(dev)->block_size);
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
......
......@@ -3,7 +3,6 @@
#define IBLOCK_VERSION "4.0"
#define IBLOCK_HBA_QUEUE_DEPTH 512
#define IBLOCK_DEVICE_QUEUE_DEPTH 32
#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
......
此差异已折叠。
......@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation_template *, u64,
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
......
......@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int);
/* pscsi_get_sh():
*
*
*/
static struct Scsi_Host *pscsi_get_sh(u32 host_no)
{
struct Scsi_Host *sh = NULL;
sh = scsi_host_lookup(host_no);
if (IS_ERR(sh)) {
printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
" %u\n", host_no);
return NULL;
}
return sh;
}
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
......@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
int hba_depth;
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
if (!(phv)) {
printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
return -1;
return -ENOMEM;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
hba->hba_ptr = (void *)phv;
printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
" Target Core with TCQ Depth: %d\n", hba->hba_id,
atomic_read(&hba->max_queue_depth));
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
hba->hba_id);
return 0;
}
......@@ -130,7 +107,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/*
* Release the struct Scsi_Host
*/
......@@ -140,8 +116,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
......@@ -154,22 +128,12 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
sh = pscsi_get_sh(phv->phv_host_id);
if (!(sh)) {
sh = scsi_host_lookup(phv->phv_host_id);
if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
return -1;
return PTR_ERR(sh);
}
/*
* Usually the SCSI LLD will use the hostt->can_queue value to define
* its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
* this at all and set sh->can_queue at runtime.
*/
hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
sh->hostt->can_queue : sh->can_queue;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
......@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
return -1;
return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
......@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
out_free:
kfree(buf);
return -1;
return -EPERM;
}
static void
......@@ -601,11 +565,11 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
sh = pscsi_get_sh(pdv->pdv_host_id);
if (!(sh)) {
sh = scsi_host_lookup(pdv->pdv_host_id);
if (IS_ERR(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
return ERR_PTR(-ENODEV);
return (struct se_device *) sh;
}
}
} else {
......@@ -728,13 +692,12 @@ static int pscsi_transport_complete(struct se_task *task)
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (!TASK_CMD(task)->se_deve)
if (!task->task_se_cmd->se_deve)
goto after_mode_sense;
if (TASK_CMD(task)->se_deve->lun_flags &
if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = (unsigned char *)
T_TASK(task->task_se_cmd)->t_task_buf;
unsigned char *buf = task->task_se_cmd->t_task->t_task_buf;
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
......@@ -800,7 +763,7 @@ static struct se_task *
pscsi_alloc_task(struct se_cmd *cmd)
{
struct pscsi_plugin_task *pt;
unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
unsigned char *cdb = cmd->t_task->t_task_cdb;
pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
if (!pt) {
......@@ -813,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd)
* allocate the extended CDB buffer for per struct se_task context
* pt->pscsi_cdb now.
*/
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) {
pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
if (!(pt->pscsi_cdb)) {
......@@ -926,7 +889,7 @@ static void pscsi_free_task(struct se_task *task)
* Release the extended CDB allocation from pscsi_alloc_task()
* if one exists.
*/
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb)
kfree(pt->pscsi_cdb);
/*
* We do not release the bio(s) here associated with this task, as
......@@ -1030,7 +993,7 @@ static ssize_t pscsi_check_configfs_dev_params(
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -1;
return -EINVAL;
}
return 0;
......@@ -1291,7 +1254,7 @@ static int pscsi_map_task_SG(struct se_task *task)
*/
static int pscsi_map_task_non_SG(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
int ret = 0;
......@@ -1303,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task)
return 0;
ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
pt->pscsi_req, T_TASK(cmd)->t_task_buf,
pt->pscsi_req, cmd->t_task->t_task_buf,
task->task_size, GFP_KERNEL);
if (ret < 0) {
printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
......@@ -1400,13 +1363,11 @@ static inline void pscsi_process_SAM_status(
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
TASK_CMD(task)->transport_error_status =
task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0);
break;
}
return;
}
static void pscsi_req_done(struct request *req, int uptodate)
......
......@@ -2,7 +2,6 @@
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
......
......@@ -66,17 +66,14 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
rd_host->rd_host_id = host_id;
atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) rd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
RD_MAX_SECTORS);
" MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
......@@ -339,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd)
printk(KERN_ERR "Unable to allocate struct rd_request\n");
return NULL;
}
rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr;
return &rd_req->rd_task;
}
......@@ -383,7 +380,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
......@@ -481,7 +478,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
......@@ -506,7 +503,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
......@@ -604,7 +601,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
sg_d = &table->sg_table[j = 0];
}
......@@ -623,11 +620,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
unsigned long long lba;
int ret;
req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
(PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
DEV_ATTRIB(dev)->block_size;
(PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
......@@ -664,7 +661,7 @@ static int rd_DIRECT_with_offset(
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
......@@ -678,7 +675,7 @@ static int rd_DIRECT_with_offset(
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1;
return -ENOMEM;
}
INIT_LIST_HEAD(&se_mem->se_list);
......@@ -734,13 +731,13 @@ static int rd_DIRECT_with_offset(
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
......@@ -767,7 +764,7 @@ static int rd_DIRECT_without_offset(
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR
......@@ -780,7 +777,7 @@ static int rd_DIRECT_without_offset(
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1;
return -ENOMEM;
}
INIT_LIST_HEAD(&se_mem->se_list);
......@@ -816,13 +813,13 @@ static int rd_DIRECT_without_offset(
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
......@@ -848,13 +845,11 @@ static int rd_DIRECT_do_se_mem_map(
u32 task_offset = *task_offset_in;
unsigned long long lba;
int ret;
int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
PAGE_SIZE);
lba = task->task_lba;
req->rd_offset = (do_div(lba,
(PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
DEV_ATTRIB(task->se_dev)->block_size;
req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE);
req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
req->rd_size = task->task_size;
if (req->rd_offset)
......@@ -867,7 +862,7 @@ static int rd_DIRECT_do_se_mem_map(
if (ret < 0)
return ret;
if (CMD_TFO(cmd)->task_sg_chaining == 0)
if (cmd->se_tfo->task_sg_chaining == 0)
return 0;
/*
* Currently prevent writers from multiple HW fabrics doing
......@@ -876,7 +871,7 @@ static int rd_DIRECT_do_se_mem_map(
if (cmd->data_direction == DMA_TO_DEVICE) {
printk(KERN_ERR "DMA_TO_DEVICE not supported for"
" RAMDISK_DR with task_sg_chaining=1\n");
return -1;
return -ENOSYS;
}
/*
* Special case for if task_sg_chaining is enabled, then
......@@ -884,14 +879,15 @@ static int rd_DIRECT_do_se_mem_map(
* transport_do_task_sg_chain() for creating chainged SGLs
* across multiple struct se_task->task_sg[].
*/
if (!(transport_calc_sg_num(task,
list_entry(T_TASK(cmd)->t_mem_list->next,
ret = transport_init_task_sg(task,
list_entry(cmd->t_task->t_mem_list->next,
struct se_mem, se_list),
task_offset)))
return -1;
task_offset);
if (ret <= 0)
return ret;
return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
list_entry(T_TASK(cmd)->t_mem_list->next,
list_entry(cmd->t_task->t_mem_list->next,
struct se_mem, se_list),
out_se_mem, se_mem_cnt, task_offset_in);
}
......@@ -975,7 +971,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
printk(KERN_INFO "Missing rd_pages= parameter\n");
return -1;
return -EINVAL;
}
return 0;
......@@ -1021,7 +1017,7 @@ static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
DEV_ATTRIB(dev)->block_size) - 1;
dev->se_sub_dev->se_dev_attrib.block_size) - 1;
return blocks_long;
}
......
......@@ -7,8 +7,6 @@
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
/* Maximum queuedepth for the Ramdisk HBA */
#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
......
......@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name(
return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
(char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
(strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
dev->se_sub_dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
......@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int j;
char str[28];
int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
if (!dev)
return -ENODEV;
/* scsiLuVendorId */
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
for (j = 0; j < 8; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
DEV_T10_WWN(dev)->vendor[j] : 0x20;
str[8] = 0;
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(vend);
......@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int j;
char str[28];
int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
if (!dev)
return -ENODEV;
/* scsiLuProductId */
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
for (j = 0; j < 16; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
DEV_T10_WWN(dev)->model[j] : 0x20;
str[16] = 0;
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
dev->se_sub_dev->t10_wwn.model[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(prod);
......@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
int j;
char str[28];
int i;
char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
if (!dev)
return -ENODEV;
/* scsiLuRevisionId */
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
for (j = 0; j < 4; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
DEV_T10_WWN(dev)->revision[j] : 0x20;
str[4] = 0;
for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
dev->se_sub_dev->t10_wwn.revision[i] : ' ';
str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(rev);
......@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type(
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
TRANSPORT(dev)->get_device_type(dev));
dev->transport->get_device_type(dev));
}
DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
......@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
{
struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
......@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
spin_unlock(&lun->lun_sep_lock);
return ret;
}
......@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
TPG_TFO(tpg)->tpg_get_tag(tpg));
tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
......@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
tpg = sep->sep_tpg;
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
TPG_TFO(tpg)->get_fabric_name());
tpg->se_tpg_tfo->get_fabric_name());
spin_unlock(&lun->lun_sep_lock);
return ret;
}
......@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
}
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
......@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
wwn = DEV_T10_WWN(dev);
wwn = &dev->se_sub_dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
TPG_TFO(tpg)->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
wwn->vendor);
spin_unlock(&lun->lun_sep_lock);
......@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
port_stat_grp->default_groups[3] = NULL;
}
......@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
......@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
......@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
TPG_TFO(tpg)->tpg_get_inst_index(tpg));
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
......@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
......@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
tpg = nacl->se_tpg;
/* scsiAttIntrPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
TPG_TFO(tpg)->sess_get_index(se_sess));
tpg->se_tpg_tfo->sess_get_index(se_sess));
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
......@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
tpg = nacl->se_tpg;
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
(unsigned char *)&buf[0], 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
......@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
ml_stat_grp->default_groups[2] = NULL;
}
此差异已折叠。
此差异已折叠。
......@@ -80,10 +80,10 @@ int core_scsi3_ua_check(
case REQUEST_SENSE:
return 0;
default:
return -1;
return -EINVAL;
}
return -1;
return -EINVAL;
}
int core_scsi3_ua_allocate(
......@@ -98,12 +98,12 @@ int core_scsi3_ua_allocate(
* PASSTHROUGH OPS
*/
if (!(nacl))
return -1;
return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) {
printk(KERN_ERR "Unable to allocate struct se_ua\n");
return -1;
return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
......@@ -179,7 +179,7 @@ int core_scsi3_ua_allocate(
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
......@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
u8 *asc,
u8 *ascq)
{
struct se_device *dev = SE_DEV(cmd);
struct se_device *dev = cmd->se_lun->lun_se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
......@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
......@@ -267,10 +267,10 @@ void core_scsi3_ua_for_check_condition(
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(),
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
(dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
......@@ -285,17 +285,17 @@ int core_scsi3_ua_clear_for_request_sense(
int head = 1;
if (!(sess))
return -1;
return -EINVAL;
nacl = sess->se_node_acl;
if (!(nacl))
return -1;
return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock);
return -1;
return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
......@@ -325,8 +325,8 @@ int core_scsi3_ua_clear_for_request_sense(
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -1 : 0;
return (head) ? -EPERM : 0;
}
......@@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
caller, cmd, cmd->cdb);
printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
task = T_TASK(se_cmd);
task = se_cmd->t_task;
printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
caller, cmd, task, task->t_tasks_se_num,
task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
......@@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd)
* TCM/LIO target
*/
transport_do_task_sg_chain(se_cmd);
cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
cmd->sg = se_cmd->t_task->t_tasks_sg_chained;
cmd->sg_cnt =
T_TASK(se_cmd)->t_tasks_sg_chained_no;
se_cmd->t_task->t_tasks_sg_chained_no;
}
if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
cmd->sg, cmd->sg_cnt))
......@@ -670,7 +670,6 @@ static void ft_send_cmd(struct ft_cmd *cmd)
err:
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
/*
......
......@@ -582,10 +582,10 @@ int ft_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
if (!fabric) {
if (IS_ERR(fabric)) {
printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
__func__);
return -1;
return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
......
......@@ -90,7 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
task = T_TASK(se_cmd);
task = se_cmd->t_task;
BUG_ON(!task);
remaining = se_cmd->data_length;
......@@ -236,7 +236,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
u32 f_ctl;
void *buf;
task = T_TASK(se_cmd);
task = se_cmd->t_task;
BUG_ON(!task);
fh = fc_frame_header_get(fp);
......
此差异已折叠。
......@@ -111,9 +111,8 @@ struct se_subsystem_api;
extern struct kmem_cache *se_mem_cache;
extern int init_se_global(void);
extern void release_se_global(void);
extern void init_scsi_index_table(void);
extern int init_se_kmem_caches(void);
extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
extern int transport_subsystem_check_init(void);
......@@ -184,7 +183,7 @@ extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd_to_pool(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
void *, struct se_mem *,
struct se_mem **, u32 *, u32 *);
......@@ -352,9 +351,4 @@ struct se_subsystem_api {
unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned;
#define TRANSPORT(dev) ((dev)->transport)
#define HBA_TRANSPORT(hba) ((hba)->transport)
extern struct se_global *se_global;
#endif /* TARGET_CORE_TRANSPORT_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册