提交 5c64e3a4 编写于 作者: L Linus Torvalds

Merge branch 'queue' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull target fixes from Nicholas Bellinger:
 "A handful of fixes + minor changes this time around, along with one
  important >= v3.9 regression fix for IBLOCK backends.  The highlights
  include:

   - Use FD_MAX_SECTORS in FILEIO for block_device as
     well as files (agrover)

   - Fix processing of out-of-order CmdSNs with
     iSBD driver (shlomo)

   - Close long-standing target_put_sess_cmd() vs.
     core_tmr_abort_task() race with the addition of
     kref_put_spinlock_irqsave() (joern + greg-kh)

   - Fix IBLOCK WCE=1 + DPOFUA=1 backend WRITE
     regression in >= v3.9 (nab + bootc)

  Note these four patches are CC'ed to stable.

  Also, there is still some work left to be done on the active I/O
  shutdown path in target_wait_for_sess_cmds() used by tcm_qla2xxx +
  ib_isert fabrics that is still being discussed on the list, and will
  hopefully be resolved soon."

* 'queue' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target: close target_put_sess_cmd() vs. core_tmr_abort_task() race
  target: removed unused transport_state flag
  target/iblock: Fix WCE=1 + DPOFUA=1 backend WRITE regression
  MAINTAINERS: Update target git tree URL
  iscsi-target: Fix typos in RDMAEXTENSIONS macro usage
  target/rd: Add ramdisk bit for NULLIO operation
  iscsi-target: Fix processing of OOO commands
  iscsi-target: Make buf param of iscsit_do_crypto_hash_buf() const void *
  iscsi-target: Fix NULL pointer dereference in iscsit_send_reject
  target: Have dev/enable show if TCM device is configured
  target: Use FD_MAX_SECTORS/FD_BLOCKSIZE for blockdevs using fileio
  target: Remove unused struct members in se_dev_entry
......@@ -7854,7 +7854,7 @@ L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
S: Supported
F: drivers/target/
F: include/target/
......
......@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
static void iscsit_do_crypto_hash_buf(
struct hash_desc *hash,
unsigned char *buf,
const void *buf,
u32 payload_length,
u32 padding,
u8 *pad_bytes,
......@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
......@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
tx_size += ISCSI_CRC_LEN;
pr_debug("Attaching CRC32C HeaderDigest to"
......@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
u32 iov_count = 0, tx_size = 0;
struct iscsi_reject *hdr;
struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
struct kvec *iov;
u32 iov_count = 0, tx_size;
iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
iscsit_build_reject(cmd, conn, hdr);
iov = &cmd->iov_misc[0];
iov[iov_count].iov_base = cmd->pdu;
......@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)hdr, ISCSI_HDR_LEN,
0, NULL, (u8 *)header_digest);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
......@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
}
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
(unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
0, NULL, (u8 *)&cmd->data_crc);
iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
iov[iov_count].iov_base = &cmd->data_crc;
iov[iov_count++].iov_len = ISCSI_CRC_LEN;
......
......@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
/*
* CmdSN is greater than the tail of the list.
*/
if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
list_add_tail(&ooo_cmdsn->ooo_list,
&sess->sess_ooo_cmdsn_list);
else {
......@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
*/
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
ooo_list) {
if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
continue;
/* Insert before this entry */
list_add(&ooo_cmdsn->ooo_list,
&ooo_tmp->ooo_list);
ooo_tmp->ooo_list.prev);
break;
}
}
......
......@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
/*
* Extra parameters for ISER from RFC-5046
*/
param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
if (!param)
......@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, OFMARKINT)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, RDMAEXTENTIONS)) {
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
if (iser == true)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
......@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, OFMARKINT))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, RDMAEXTENTIONS))
else if (!strcmp(param->name, RDMAEXTENSIONS))
param->state &= ~PSTATE_NEGOTIATE;
else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
param->state &= ~PSTATE_NEGOTIATE;
......@@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters(
ops->SessionType = !strcmp(param->value, DISCOVERY);
pr_debug("SessionType: %s\n",
param->value);
} else if (!strcmp(param->name, RDMAEXTENTIONS)) {
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
ops->RDMAExtensions = !strcmp(param->value, YES);
pr_debug("RDMAExtensions: %s\n",
param->value);
......
......@@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
*/
#define RDMAEXTENTIONS "RDMAExtensions"
#define RDMAEXTENSIONS "RDMAExtensions"
#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
......@@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
/*
* Initial values for iSER parameters following RFC-5046 Section 6
*/
#define INITIAL_RDMAEXTENTIONS NO
#define INITIAL_RDMAEXTENSIONS NO
#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
......
......@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
.store = target_core_store_dev_udev_path,
};
static ssize_t target_core_show_dev_enable(void *p, char *page)
{
struct se_device *dev = p;
return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
}
static ssize_t target_core_store_dev_enable(
void *p,
const char *page,
......@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
static struct target_core_configfs_attribute target_core_attr_dev_enable = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "enable",
.ca_mode = S_IWUSR },
.show = NULL,
.ca_mode = S_IRUGO | S_IWUSR },
.show = target_core_show_dev_enable,
.store = target_core_store_dev_enable,
};
......
......@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
struct se_dev_entry *deve = se_cmd->se_deve;
deve->total_cmds++;
deve->total_bytes += se_cmd->data_length;
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
......@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
deve->read_bytes += se_cmd->data_length;
deve->deve_cmds++;
se_lun = deve->se_lun;
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
......@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
return 0;
}
void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
unsigned long flags;
spin_lock_irqsave(&se_nacl->device_list_lock, flags);
deve = se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
}
void core_update_device_list_access(
u32 mapped_lun,
u32 lun_access,
......
......@@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev)
struct request_queue *q = bdev_get_queue(inode->i_bdev);
unsigned long long dev_size;
dev->dev_attrib.hw_block_size =
bdev_logical_block_size(inode->i_bdev);
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
......@@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev)
goto fail;
}
dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
/*
* Limit UNMAP emulation to 8k Number of LBAs (NoLB)
*/
......@@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev)
fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
......
......@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
rw = WRITE_FUA;
else if (!(q->flush_flags & REQ_FLUSH))
rw = WRITE_FUA;
else
rw = WRITE;
} else {
rw = WRITE;
}
......
......@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
......
......@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
u32 src_len;
u64 tmp;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
......@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
}
enum {
Opt_rd_pages, Opt_err
Opt_rd_pages, Opt_rd_nullio, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
{Opt_err, NULL}
};
......@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
case Opt_rd_nullio:
match_int(args, &arg);
if (arg != 1)
break;
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
default:
break;
}
......@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
" SG_table_count: %u\n", rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count);
" SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
!!(rd_dev->rd_flags & RDF_NULLIO));
return bl;
}
......
......@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
#define RDF_NULLIO 0x02
struct rd_dev {
struct se_device dev;
......
......@@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
if (wait_for_tasks)
transport_wait_for_tasks(cmd);
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
......@@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
spin_unlock(&se_sess->sess_cmd_lock);
complete(&se_cmd->cmd_wait_comp);
return;
}
list_del(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
spin_unlock(&se_sess->sess_cmd_lock);
se_cmd->se_tfo->release_cmd(se_cmd);
}
......@@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
*/
int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
{
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
&se_sess->sess_cmd_lock);
}
EXPORT_SYMBOL(target_put_sess_cmd);
......
......@@ -19,6 +19,7 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
......@@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
......
......@@ -463,7 +463,6 @@ struct se_cmd {
#define CMD_T_ABORTED (1 << 0)
#define CMD_T_ACTIVE (1 << 1)
#define CMD_T_COMPLETE (1 << 2)
#define CMD_T_QUEUED (1 << 3)
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
#define CMD_T_FAILED (1 << 6)
......@@ -572,12 +571,8 @@ struct se_dev_entry {
bool def_pr_registered;
/* See transport_lunflags_table */
u32 lun_flags;
u32 deve_cmds;
u32 mapped_lun;
u32 average_bytes;
u32 last_byte_count;
u32 total_cmds;
u32 total_bytes;
u64 pr_res_key;
u64 creation_time;
u32 attach_count;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册