提交 5716af6e 编写于 作者: S Sagi Grimberg 提交者: Roland Dreier

IB/iser: Rename ib_conn -> iser_conn

Two reasons why we choose to do this:

1. No point today calling struct iser_conn by another name ib_conn
2. In the next patches we will restructure iser control plane representation
   - struct iser_conn: connection logical representation
   - struct ib_conn: connection RDMA layout representation

This patch does not change any functionality.
Signed-off-by: NAriel Nahum <arieln@mellanox.com>
Signed-off-by: NSagi Grimberg <sagig@mellanox.com>
Signed-off-by: NOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: NRoland Dreier <roland@purestorage.com>
上级 fe82dcec
...@@ -147,8 +147,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) ...@@ -147,8 +147,8 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
int iser_initialize_task_headers(struct iscsi_task *task, int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc) struct iser_tx_desc *tx_desc)
{ {
struct iser_conn *ib_conn = task->conn->dd_data; struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr; u64 dma_addr;
...@@ -162,7 +162,7 @@ int iser_initialize_task_headers(struct iscsi_task *task, ...@@ -162,7 +162,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey; tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->ib_conn = ib_conn; iser_task->iser_conn = iser_conn;
return 0; return 0;
} }
/** /**
...@@ -290,8 +290,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) ...@@ -290,8 +290,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = &iser_task->desc; struct iser_tx_desc *tx_desc = &iser_task->desc;
struct iser_conn *ib_conn = task->conn->dd_data; struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
ib_dma_unmap_single(device->ib_device, ib_dma_unmap_single(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
...@@ -344,7 +344,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -344,7 +344,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
int is_leading) int is_leading)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
int error; int error;
...@@ -360,30 +360,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -360,30 +360,30 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
(unsigned long long)transport_eph); (unsigned long long)transport_eph);
return -EINVAL; return -EINVAL;
} }
ib_conn = ep->dd_data; iser_conn = ep->dd_data;
mutex_lock(&ib_conn->state_mutex); mutex_lock(&iser_conn->state_mutex);
if (ib_conn->state != ISER_CONN_UP) { if (iser_conn->state != ISER_CONN_UP) {
error = -EINVAL; error = -EINVAL;
iser_err("iser_conn %p state is %d, teardown started\n", iser_err("iser_conn %p state is %d, teardown started\n",
ib_conn, ib_conn->state); iser_conn, iser_conn->state);
goto out; goto out;
} }
error = iser_alloc_rx_descriptors(ib_conn, conn->session); error = iser_alloc_rx_descriptors(iser_conn, conn->session);
if (error) if (error)
goto out; goto out;
/* binds the iSER connection retrieved from the previously /* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges * connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */ * connection pointers */
iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn); iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
conn->dd_data = ib_conn; conn->dd_data = iser_conn;
ib_conn->iscsi_conn = conn; iser_conn->iscsi_conn = conn;
out: out:
mutex_unlock(&ib_conn->state_mutex); mutex_unlock(&iser_conn->state_mutex);
return error; return error;
} }
...@@ -391,11 +391,11 @@ static int ...@@ -391,11 +391,11 @@ static int
iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{ {
struct iscsi_conn *iscsi_conn; struct iscsi_conn *iscsi_conn;
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
iscsi_conn = cls_conn->dd_data; iscsi_conn = cls_conn->dd_data;
ib_conn = iscsi_conn->dd_data; iser_conn = iscsi_conn->dd_data;
reinit_completion(&ib_conn->stop_completion); reinit_completion(&iser_conn->stop_completion);
return iscsi_conn_start(cls_conn); return iscsi_conn_start(cls_conn);
} }
...@@ -404,18 +404,18 @@ static void ...@@ -404,18 +404,18 @@ static void
iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *ib_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
iser_dbg("stopping iscsi_conn: %p, ib_conn: %p\n", conn, ib_conn); iser_dbg("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
iscsi_conn_stop(cls_conn, flag); iscsi_conn_stop(cls_conn, flag);
/* /*
* Userspace may have goofed up and not bound the connection or * Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection. * might have only partially setup the connection.
*/ */
if (ib_conn) { if (iser_conn) {
conn->dd_data = NULL; conn->dd_data = NULL;
complete(&ib_conn->stop_completion); complete(&iser_conn->stop_completion);
} }
} }
...@@ -447,7 +447,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -447,7 +447,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session; struct iscsi_cls_session *cls_session;
struct iscsi_session *session; struct iscsi_session *session;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct iser_conn *ib_conn = NULL; struct iser_conn *iser_conn = NULL;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost) if (!shost)
...@@ -464,9 +464,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -464,9 +464,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
* the leading conn's ep so this will be NULL; * the leading conn's ep so this will be NULL;
*/ */
if (ep) { if (ep) {
ib_conn = ep->dd_data; iser_conn = ep->dd_data;
if (ib_conn->pi_support) { if (iser_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap; u32 sig_caps = iser_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
if (iser_pi_guard) if (iser_pi_guard)
...@@ -476,8 +476,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -476,8 +476,8 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
} }
} }
if (iscsi_host_add(shost, if (iscsi_host_add(shost, ep ?
ep ? ib_conn->device->ib_device->dma_device : NULL)) iser_conn->device->ib_device->dma_device : NULL))
goto free_host; goto free_host;
if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) { if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
...@@ -577,17 +577,17 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s ...@@ -577,17 +577,17 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf) enum iscsi_param param, char *buf)
{ {
struct iser_conn *ib_conn = ep->dd_data; struct iser_conn *iser_conn = ep->dd_data;
int len; int len;
switch (param) { switch (param) {
case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_ADDRESS:
if (!ib_conn || !ib_conn->cma_id) if (!iser_conn || !iser_conn->cma_id)
return -ENOTCONN; return -ENOTCONN;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&ib_conn->cma_id->route.addr.dst_addr, &iser_conn->cma_id->route.addr.dst_addr,
param, buf); param, buf);
break; break;
default: default:
...@@ -602,24 +602,24 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, ...@@ -602,24 +602,24 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking) int non_blocking)
{ {
int err; int err;
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
ep = iscsi_create_endpoint(0); ep = iscsi_create_endpoint(0);
if (!ep) if (!ep)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ib_conn = kzalloc(sizeof(*ib_conn), GFP_KERNEL); iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
if (!ib_conn) { if (!iser_conn) {
err = -ENOMEM; err = -ENOMEM;
goto failure; goto failure;
} }
ep->dd_data = ib_conn; ep->dd_data = iser_conn;
ib_conn->ep = ep; iser_conn->ep = ep;
iser_conn_init(ib_conn); iser_conn_init(iser_conn);
err = iser_connect(ib_conn, NULL, dst_addr, non_blocking); err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
if (err) if (err)
goto failure; goto failure;
...@@ -632,22 +632,22 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, ...@@ -632,22 +632,22 @@ iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
static int static int
iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{ {
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
int rc; int rc;
ib_conn = ep->dd_data; iser_conn = ep->dd_data;
rc = wait_for_completion_interruptible_timeout(&ib_conn->up_completion, rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
msecs_to_jiffies(timeout_ms)); msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */ /* if conn establishment failed, return error code to iscsi */
if (rc == 0) { if (rc == 0) {
mutex_lock(&ib_conn->state_mutex); mutex_lock(&iser_conn->state_mutex);
if (ib_conn->state == ISER_CONN_TERMINATING || if (iser_conn->state == ISER_CONN_TERMINATING ||
ib_conn->state == ISER_CONN_DOWN) iser_conn->state == ISER_CONN_DOWN)
rc = -1; rc = -1;
mutex_unlock(&ib_conn->state_mutex); mutex_unlock(&iser_conn->state_mutex);
} }
iser_info("ib conn %p rc = %d\n", ib_conn, rc); iser_info("ib conn %p rc = %d\n", iser_conn, rc);
if (rc > 0) if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */ return 1; /* success, this is the equivalent of POLLOUT */
...@@ -660,12 +660,14 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) ...@@ -660,12 +660,14 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
static void static void
iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{ {
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
ib_conn = ep->dd_data; iser_conn = ep->dd_data;
iser_info("ep %p ib conn %p state %d\n", ep, ib_conn, ib_conn->state); iser_info("ep %p iser conn %p state %d\n",
mutex_lock(&ib_conn->state_mutex); ep, iser_conn, iser_conn->state);
iser_conn_terminate(ib_conn);
mutex_lock(&iser_conn->state_mutex);
iser_conn_terminate(iser_conn);
/* /*
* if iser_conn and iscsi_conn are bound, we must wait for * if iser_conn and iscsi_conn are bound, we must wait for
...@@ -673,14 +675,14 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) ...@@ -673,14 +675,14 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
* the iser resources. Otherwise we are safe to free resources * the iser resources. Otherwise we are safe to free resources
* immediately. * immediately.
*/ */
if (ib_conn->iscsi_conn) { if (iser_conn->iscsi_conn) {
INIT_WORK(&ib_conn->release_work, iser_release_work); INIT_WORK(&iser_conn->release_work, iser_release_work);
queue_work(release_wq, &ib_conn->release_work); queue_work(release_wq, &iser_conn->release_work);
mutex_unlock(&ib_conn->state_mutex); mutex_unlock(&iser_conn->state_mutex);
} else { } else {
ib_conn->state = ISER_CONN_DOWN; iser_conn->state = ISER_CONN_DOWN;
mutex_unlock(&ib_conn->state_mutex); mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(ib_conn); iser_conn_release(iser_conn);
} }
iscsi_destroy_endpoint(ep); iscsi_destroy_endpoint(ep);
} }
...@@ -843,7 +845,7 @@ static int __init iser_init(void) ...@@ -843,7 +845,7 @@ static int __init iser_init(void)
static void __exit iser_exit(void) static void __exit iser_exit(void)
{ {
struct iser_conn *ib_conn, *n; struct iser_conn *iser_conn, *n;
int connlist_empty; int connlist_empty;
iser_dbg("Removing iSER datamover...\n"); iser_dbg("Removing iSER datamover...\n");
...@@ -856,8 +858,9 @@ static void __exit iser_exit(void) ...@@ -856,8 +858,9 @@ static void __exit iser_exit(void)
if (!connlist_empty) { if (!connlist_empty) {
iser_err("Error cleanup stage completed but we still have iser " iser_err("Error cleanup stage completed but we still have iser "
"connections, destroying them anyway.\n"); "connections, destroying them anyway.\n");
list_for_each_entry_safe(ib_conn, n, &ig.connlist, conn_list) { list_for_each_entry_safe(iser_conn, n, &ig.connlist,
iser_conn_release(ib_conn); conn_list) {
iser_conn_release(iser_conn);
} }
} }
......
...@@ -179,7 +179,7 @@ struct iser_cm_hdr { ...@@ -179,7 +179,7 @@ struct iser_cm_hdr {
/* Length of an object name string */ /* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64 #define ISER_OBJECT_NAME_SIZE 64
enum iser_ib_conn_state { enum iser_conn_state {
ISER_CONN_INIT, /* descriptor allocd, no conn */ ISER_CONN_INIT, /* descriptor allocd, no conn */
ISER_CONN_PENDING, /* in the process of being established */ ISER_CONN_PENDING, /* in the process of being established */
ISER_CONN_UP, /* up and running */ ISER_CONN_UP, /* up and running */
...@@ -281,9 +281,9 @@ struct iser_device { ...@@ -281,9 +281,9 @@ struct iser_device {
int cq_active_qps[ISER_MAX_CQ]; int cq_active_qps[ISER_MAX_CQ];
int cqs_used; int cqs_used;
struct iser_cq_desc *cq_desc; struct iser_cq_desc *cq_desc;
int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn, int (*iser_alloc_rdma_reg_res)(struct iser_conn *iser_conn,
unsigned cmds_max); unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn); void (*iser_free_rdma_reg_res)(struct iser_conn *iser_conn);
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
...@@ -320,7 +320,7 @@ struct fast_reg_descriptor { ...@@ -320,7 +320,7 @@ struct fast_reg_descriptor {
struct iser_conn { struct iser_conn {
struct iscsi_conn *iscsi_conn; struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
enum iser_ib_conn_state state; /* rdma connection state */ enum iser_conn_state state; /* rdma connection state */
atomic_t refcount; atomic_t refcount;
spinlock_t lock; /* used for state changes */ spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */ struct iser_device *device; /* device context */
...@@ -363,7 +363,7 @@ struct iser_conn { ...@@ -363,7 +363,7 @@ struct iser_conn {
struct iscsi_iser_task { struct iscsi_iser_task {
struct iser_tx_desc desc; struct iser_tx_desc desc;
struct iser_conn *ib_conn; struct iser_conn *iser_conn;
enum iser_task_status status; enum iser_task_status status;
struct scsi_cmnd *sc; struct scsi_cmnd *sc;
int command_sent; /* set if command sent */ int command_sent; /* set if command sent */
...@@ -419,25 +419,26 @@ void iscsi_iser_recv(struct iscsi_conn *conn, ...@@ -419,25 +419,26 @@ void iscsi_iser_recv(struct iscsi_conn *conn,
char *rx_data, char *rx_data,
int rx_data_len); int rx_data_len);
void iser_conn_init(struct iser_conn *ib_conn); void iser_conn_init(struct iser_conn *iser_conn);
void iser_conn_release(struct iser_conn *ib_conn); void iser_conn_release(struct iser_conn *iser_conn);
void iser_conn_terminate(struct iser_conn *ib_conn); void iser_conn_terminate(struct iser_conn *iser_conn);
void iser_release_work(struct work_struct *work); void iser_release_work(struct work_struct *work);
void iser_rcv_completion(struct iser_rx_desc *desc, void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len, unsigned long dto_xfer_len,
struct iser_conn *ib_conn); struct iser_conn *iser_conn);
void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn); void iser_snd_completion(struct iser_tx_desc *desc,
struct iser_conn *iser_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task); void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task); void iser_task_rdma_finalize(struct iscsi_iser_task *task);
void iser_free_rx_descriptors(struct iser_conn *ib_conn); void iser_free_rx_descriptors(struct iser_conn *iser_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem, struct iser_data_buf *mem,
...@@ -449,12 +450,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, ...@@ -449,12 +450,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn, int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr, struct sockaddr *src_addr,
struct sockaddr *dst_addr, struct sockaddr *dst_addr,
int non_blocking); int non_blocking);
int iser_reg_page_vec(struct iser_conn *ib_conn, int iser_reg_page_vec(struct iser_conn *iser_conn,
struct iser_page_vec *page_vec, struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg); struct iser_mem_reg *mem_reg);
...@@ -463,9 +464,9 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -463,9 +464,9 @@ void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn); int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count); int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc); int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data, struct iser_data_buf *data,
...@@ -476,11 +477,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ...@@ -476,11 +477,12 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data); struct iser_data_buf *data);
int iser_initialize_task_headers(struct iscsi_task *task, int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc); struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); struct iscsi_session *session);
void iser_free_fmr_pool(struct iser_conn *ib_conn); int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max);
int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max); void iser_free_fmr_pool(struct iser_conn *iser_conn);
void iser_free_fastreg_pool(struct iser_conn *ib_conn); int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct iser_conn *iser_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector); enum iser_data_dir cmd_dir, sector_t *sector);
#endif #endif
...@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) ...@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->ib_conn->device; struct iser_device *device = iser_task->iser_conn->device;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
...@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl) unsigned int edtl)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->ib_conn->device; struct iser_device *device = iser_task->iser_conn->device;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
...@@ -160,10 +160,10 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -160,10 +160,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
} }
/* creates a new tx descriptor and adds header regd buffer */ /* creates a new tx descriptor and adds header regd buffer */
static void iser_create_send_desc(struct iser_conn *ib_conn, static void iser_create_send_desc(struct iser_conn *iser_conn,
struct iser_tx_desc *tx_desc) struct iser_tx_desc *tx_desc)
{ {
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
ib_dma_sync_single_for_cpu(device->ib_device, ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
...@@ -179,103 +179,106 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, ...@@ -179,103 +179,106 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
} }
} }
static void iser_free_login_buf(struct iser_conn *ib_conn) static void iser_free_login_buf(struct iser_conn *iser_conn)
{ {
if (!ib_conn->login_buf) if (!iser_conn->login_buf)
return; return;
if (ib_conn->login_req_dma) if (iser_conn->login_req_dma)
ib_dma_unmap_single(ib_conn->device->ib_device, ib_dma_unmap_single(iser_conn->device->ib_device,
ib_conn->login_req_dma, iser_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
if (ib_conn->login_resp_dma) if (iser_conn->login_resp_dma)
ib_dma_unmap_single(ib_conn->device->ib_device, ib_dma_unmap_single(iser_conn->device->ib_device,
ib_conn->login_resp_dma, iser_conn->login_resp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->login_buf); kfree(iser_conn->login_buf);
/* make sure we never redo any unmapping */ /* make sure we never redo any unmapping */
ib_conn->login_req_dma = 0; iser_conn->login_req_dma = 0;
ib_conn->login_resp_dma = 0; iser_conn->login_resp_dma = 0;
ib_conn->login_buf = NULL; iser_conn->login_buf = NULL;
} }
static int iser_alloc_login_buf(struct iser_conn *ib_conn) static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{ {
struct iser_device *device; struct iser_device *device;
int req_err, resp_err; int req_err, resp_err;
BUG_ON(ib_conn->device == NULL); BUG_ON(iser_conn->device == NULL);
device = ib_conn->device; device = iser_conn->device;
ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL); ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!ib_conn->login_buf) if (!iser_conn->login_buf)
goto out_err; goto out_err;
ib_conn->login_req_buf = ib_conn->login_buf; iser_conn->login_req_buf = iser_conn->login_buf;
ib_conn->login_resp_buf = ib_conn->login_buf + iser_conn->login_resp_buf = iser_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN; ISCSI_DEF_MAX_RECV_SEG_LEN;
ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, iser_conn->login_req_dma = ib_dma_map_single(device->ib_device,
(void *)ib_conn->login_req_buf, iser_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, iser_conn->login_resp_dma = ib_dma_map_single(device->ib_device,
(void *)ib_conn->login_resp_buf, iser_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device, req_err = ib_dma_mapping_error(device->ib_device,
ib_conn->login_req_dma); iser_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device, resp_err = ib_dma_mapping_error(device->ib_device,
ib_conn->login_resp_dma); iser_conn->login_resp_dma);
if (req_err || resp_err) { if (req_err || resp_err) {
if (req_err) if (req_err)
ib_conn->login_req_dma = 0; iser_conn->login_req_dma = 0;
if (resp_err) if (resp_err)
ib_conn->login_resp_dma = 0; iser_conn->login_resp_dma = 0;
goto free_login_buf; goto free_login_buf;
} }
return 0; return 0;
free_login_buf: free_login_buf:
iser_free_login_buf(ib_conn); iser_free_login_buf(iser_conn);
out_err: out_err:
iser_err("unable to alloc or map login buf\n"); iser_err("unable to alloc or map login buf\n");
return -ENOMEM; return -ENOMEM;
} }
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session) int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session)
{ {
int i, j; int i, j;
u64 dma_addr; u64 dma_addr;
struct iser_rx_desc *rx_desc; struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg; struct ib_sge *rx_sg;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
ib_conn->qp_max_recv_dtos = session->cmds_max; iser_conn->qp_max_recv_dtos = session->cmds_max;
ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2; iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) if (device->iser_alloc_rdma_reg_res(iser_conn, session->scsi_cmds_max))
goto create_rdma_reg_res_failed; goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(ib_conn)) if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail; goto alloc_login_buf_fail;
ib_conn->rx_descs = kmalloc(session->cmds_max * iser_conn->rx_descs = kmalloc(session->cmds_max *
sizeof(struct iser_rx_desc), GFP_KERNEL); sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!ib_conn->rx_descs) if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail; goto rx_desc_alloc_fail;
rx_desc = ib_conn->rx_descs; rx_desc = iser_conn->rx_descs;
for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) { for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr)) if (ib_dma_mapping_error(device->ib_device, dma_addr))
...@@ -289,52 +292,52 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *s ...@@ -289,52 +292,52 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *s
rx_sg->lkey = device->mr->lkey; rx_sg->lkey = device->mr->lkey;
} }
ib_conn->rx_desc_head = 0; iser_conn->rx_desc_head = 0;
return 0; return 0;
rx_desc_dma_map_failed: rx_desc_dma_map_failed:
rx_desc = ib_conn->rx_descs; rx_desc = iser_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs); kfree(iser_conn->rx_descs);
ib_conn->rx_descs = NULL; iser_conn->rx_descs = NULL;
rx_desc_alloc_fail: rx_desc_alloc_fail:
iser_free_login_buf(ib_conn); iser_free_login_buf(iser_conn);
alloc_login_buf_fail: alloc_login_buf_fail:
device->iser_free_rdma_reg_res(ib_conn); device->iser_free_rdma_reg_res(iser_conn);
create_rdma_reg_res_failed: create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n"); iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM; return -ENOMEM;
} }
void iser_free_rx_descriptors(struct iser_conn *ib_conn) void iser_free_rx_descriptors(struct iser_conn *iser_conn)
{ {
int i; int i;
struct iser_rx_desc *rx_desc; struct iser_rx_desc *rx_desc;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
if (!ib_conn->rx_descs) if (!iser_conn->rx_descs)
goto free_login_buf; goto free_login_buf;
if (device->iser_free_rdma_reg_res) if (device->iser_free_rdma_reg_res)
device->iser_free_rdma_reg_res(ib_conn); device->iser_free_rdma_reg_res(iser_conn);
rx_desc = ib_conn->rx_descs; rx_desc = iser_conn->rx_descs;
for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs); kfree(iser_conn->rx_descs);
/* make sure we never redo any unmapping */ /* make sure we never redo any unmapping */
ib_conn->rx_descs = NULL; iser_conn->rx_descs = NULL;
free_login_buf: free_login_buf:
iser_free_login_buf(ib_conn); iser_free_login_buf(iser_conn);
} }
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{ {
struct iser_conn *ib_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags); iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
...@@ -347,18 +350,18 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) ...@@ -347,18 +350,18 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
* response) and no posted send buffers left - they must have been * response) and no posted send buffers left - they must have been
* consumed during previous login phases. * consumed during previous login phases.
*/ */
WARN_ON(ib_conn->post_recv_buf_count != 1); WARN_ON(iser_conn->post_recv_buf_count != 1);
WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0); WARN_ON(atomic_read(&iser_conn->post_send_buf_count) != 0);
if (session->discovery_sess) { if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n"); iser_info("Discovery session, re-using login RX buffer\n");
return 0; return 0;
} else } else
iser_info("Normal session, posting batch of RX %d buffers\n", iser_info("Normal session, posting batch of RX %d buffers\n",
ib_conn->min_posted_rx); iser_conn->min_posted_rx);
/* Initial post receive buffers */ /* Initial post receive buffers */
if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx)) if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
return -ENOMEM; return -ENOMEM;
return 0; return 0;
...@@ -370,7 +373,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) ...@@ -370,7 +373,7 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
int iser_send_command(struct iscsi_conn *conn, int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task) struct iscsi_task *task)
{ {
struct iser_conn *ib_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl; unsigned long edtl;
int err; int err;
...@@ -383,7 +386,7 @@ int iser_send_command(struct iscsi_conn *conn, ...@@ -383,7 +386,7 @@ int iser_send_command(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
tx_desc->type = ISCSI_TX_SCSI_COMMAND; tx_desc->type = ISCSI_TX_SCSI_COMMAND;
iser_create_send_desc(ib_conn, tx_desc); iser_create_send_desc(iser_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ) { if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN]; data_buf = &iser_task->data[ISER_DIR_IN];
...@@ -423,7 +426,7 @@ int iser_send_command(struct iscsi_conn *conn, ...@@ -423,7 +426,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED; iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(ib_conn, tx_desc); err = iser_post_send(iser_conn, tx_desc);
if (!err) if (!err)
return 0; return 0;
...@@ -439,7 +442,7 @@ int iser_send_data_out(struct iscsi_conn *conn, ...@@ -439,7 +442,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task, struct iscsi_task *task,
struct iscsi_data *hdr) struct iscsi_data *hdr)
{ {
struct iser_conn *ib_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL; struct iser_tx_desc *tx_desc = NULL;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
...@@ -488,7 +491,7 @@ int iser_send_data_out(struct iscsi_conn *conn, ...@@ -488,7 +491,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len); itt, buf_offset, data_seg_len);
err = iser_post_send(ib_conn, tx_desc); err = iser_post_send(iser_conn, tx_desc);
if (!err) if (!err)
return 0; return 0;
...@@ -501,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn, ...@@ -501,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
int iser_send_control(struct iscsi_conn *conn, int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task) struct iscsi_task *task)
{ {
struct iser_conn *ib_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc; struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len; unsigned long data_seg_len;
...@@ -510,9 +513,9 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -510,9 +513,9 @@ int iser_send_control(struct iscsi_conn *conn,
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL; mdesc->type = ISCSI_TX_CONTROL;
iser_create_send_desc(ib_conn, mdesc); iser_create_send_desc(iser_conn, mdesc);
device = ib_conn->device; device = iser_conn->device;
data_seg_len = ntoh24(task->hdr->dlength); data_seg_len = ntoh24(task->hdr->dlength);
...@@ -524,16 +527,16 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -524,16 +527,16 @@ int iser_send_control(struct iscsi_conn *conn,
} }
ib_dma_sync_single_for_cpu(device->ib_device, ib_dma_sync_single_for_cpu(device->ib_device,
ib_conn->login_req_dma, task->data_count, iser_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE); DMA_TO_DEVICE);
memcpy(ib_conn->login_req_buf, task->data, task->data_count); memcpy(iser_conn->login_req_buf, task->data, task->data_count);
ib_dma_sync_single_for_device(device->ib_device, ib_dma_sync_single_for_device(device->ib_device,
ib_conn->login_req_dma, task->data_count, iser_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE); DMA_TO_DEVICE);
tx_dsg->addr = ib_conn->login_req_dma; tx_dsg->addr = iser_conn->login_req_dma;
tx_dsg->length = task->data_count; tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey; tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2; mdesc->num_sge = 2;
...@@ -542,7 +545,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -542,7 +545,7 @@ int iser_send_control(struct iscsi_conn *conn,
if (task == conn->login_task) { if (task == conn->login_task) {
iser_dbg("op %x dsl %lx, posting login rx buffer\n", iser_dbg("op %x dsl %lx, posting login rx buffer\n",
task->hdr->opcode, data_seg_len); task->hdr->opcode, data_seg_len);
err = iser_post_recvl(ib_conn); err = iser_post_recvl(iser_conn);
if (err) if (err)
goto send_control_error; goto send_control_error;
err = iser_post_rx_bufs(conn, task->hdr); err = iser_post_rx_bufs(conn, task->hdr);
...@@ -550,7 +553,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -550,7 +553,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error; goto send_control_error;
} }
err = iser_post_send(ib_conn, mdesc); err = iser_post_send(iser_conn, mdesc);
if (!err) if (!err)
return 0; return 0;
...@@ -564,59 +567,59 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -564,59 +567,59 @@ int iser_send_control(struct iscsi_conn *conn,
*/ */
void iser_rcv_completion(struct iser_rx_desc *rx_desc, void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len, unsigned long rx_xfer_len,
struct iser_conn *ib_conn) struct iser_conn *iser_conn)
{ {
struct iscsi_hdr *hdr; struct iscsi_hdr *hdr;
u64 rx_dma; u64 rx_dma;
int rx_buflen, outstanding, count, err; int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */ /* differentiate between login to all other PDUs */
if ((char *)rx_desc == ib_conn->login_resp_buf) { if ((char *)rx_desc == iser_conn->login_resp_buf) {
rx_dma = ib_conn->login_resp_dma; rx_dma = iser_conn->login_resp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE; rx_buflen = ISER_RX_LOGIN_SIZE;
} else { } else {
rx_dma = rx_desc->dma_addr; rx_dma = rx_desc->dma_addr;
rx_buflen = ISER_RX_PAYLOAD_SIZE; rx_buflen = ISER_RX_PAYLOAD_SIZE;
} }
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_cpu(iser_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE); rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header; hdr = &rx_desc->iscsi_header;
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data, iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
rx_xfer_len - ISER_HEADERS_LEN); rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_device(iser_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE); rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the * /* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in * * task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits * * parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */ * for the posted rx bufs refcount to become zero handles everything */
ib_conn->post_recv_buf_count--; iser_conn->post_recv_buf_count--;
if (rx_dma == ib_conn->login_resp_dma) if (rx_dma == iser_conn->login_resp_dma)
return; return;
outstanding = ib_conn->post_recv_buf_count; outstanding = iser_conn->post_recv_buf_count;
if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) { if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
count = min(ib_conn->qp_max_recv_dtos - outstanding, count = min(iser_conn->qp_max_recv_dtos - outstanding,
ib_conn->min_posted_rx); iser_conn->min_posted_rx);
err = iser_post_recvm(ib_conn, count); err = iser_post_recvm(iser_conn, count);
if (err) if (err)
iser_err("posting %d rx bufs err %d\n", count, err); iser_err("posting %d rx bufs err %d\n", count, err);
} }
} }
void iser_snd_completion(struct iser_tx_desc *tx_desc, void iser_snd_completion(struct iser_tx_desc *tx_desc,
struct iser_conn *ib_conn) struct iser_conn *iser_conn)
{ {
struct iscsi_task *task; struct iscsi_task *task;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
if (tx_desc->type == ISCSI_TX_DATAOUT) { if (tx_desc->type == ISCSI_TX_DATAOUT) {
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
...@@ -625,7 +628,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, ...@@ -625,7 +628,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
tx_desc = NULL; tx_desc = NULL;
} }
atomic_dec(&ib_conn->post_send_buf_count); atomic_dec(&iser_conn->post_send_buf_count);
if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */ /* this arithmetic is legal by libiscsi dd_data allocation */
...@@ -658,7 +661,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) ...@@ -658,7 +661,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{ {
struct iser_device *device = iser_task->ib_conn->device; struct iser_device *device = iser_task->iser_conn->device;
int is_rdma_data_aligned = 1; int is_rdma_data_aligned = 1;
int is_rdma_prot_aligned = 1; int is_rdma_prot_aligned = 1;
int prot_count = scsi_prot_sg_count(iser_task->sc); int prot_count = scsi_prot_sg_count(iser_task->sc);
......
...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data_copy, struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct ib_device *dev = iser_task->ib_conn->device->ib_device; struct ib_device *dev = iser_task->iser_conn->device->ib_device;
struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg; struct scatterlist *sg;
char *mem = NULL; char *mem = NULL;
...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
unsigned long cmd_data_len; unsigned long cmd_data_len;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->device->ib_device;
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ? (cmd_dir == ISER_DIR_OUT) ?
...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, ...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
iser_task->dir[iser_dir] = 1; iser_task->dir[iser_dir] = 1;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) { if (data->dma_nents == 0) {
...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
{ {
struct ib_device *dev; struct ib_device *dev;
dev = iser_task->ib_conn->device->ib_device; dev = iser_task->iser_conn->device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
} }
...@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ...@@ -348,7 +348,7 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, enum iser_data_dir cmd_dir,
int aligned_len) int aligned_len)
{ {
struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
iscsi_conn->fmr_unalign_cnt++; iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
...@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ...@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
...@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va, (unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len); (unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */ } else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); iser_page_vec_build(mem, iser_conn->fmr.page_vec, ibdev);
err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, err = iser_reg_page_vec(iser_conn, iser_conn->fmr.page_vec,
&regd_buf->reg); &regd_buf->reg);
if (err && err != -EAGAIN) { if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
...@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
mem->dma_nents, mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength)); ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
ib_conn->fmr.page_vec->data_size, iser_conn->fmr.page_vec->data_size,
ib_conn->fmr.page_vec->length, iser_conn->fmr.page_vec->length,
ib_conn->fmr.page_vec->offset); iser_conn->fmr.page_vec->offset);
for (i = 0; i < ib_conn->fmr.page_vec->length; i++) for (i = 0; i < iser_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i, iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->fmr.page_vec->pages[i]); (unsigned long long)iser_conn->fmr.page_vec->pages[i]);
} }
if (err) if (err)
return err; return err;
...@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct fast_reg_descriptor *desc, struct ib_sge *data_sge, struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
struct ib_sge *prot_sge, struct ib_sge *sig_sge) struct ib_sge *prot_sge, struct ib_sge *sig_sge)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx; struct iser_pi_context *pi_ctx = desc->pi_ctx;
struct ib_send_wr sig_wr, inv_wr; struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL; struct ib_send_wr *bad_wr, *wr = NULL;
...@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
else else
wr->next = &sig_wr; wr->next = &sig_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr); ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
if (ret) { if (ret) {
iser_err("reg_sig_mr failed, ret:%d\n", ret); iser_err("reg_sig_mr failed, ret:%d\n", ret);
goto err; goto err;
...@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_sge *sge) struct ib_sge *sge)
{ {
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct iser_conn *ib_conn = iser_task->ib_conn; struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl; struct ib_fast_reg_page_list *frpl;
...@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
else else
wr->next = &fastreg_wr; wr->next = &fastreg_wr;
ret = ib_post_send(ib_conn->qp, wr, &bad_wr); ret = ib_post_send(iser_conn->qp, wr, &bad_wr);
if (ret) { if (ret) {
iser_err("fast registration failed, ret:%d\n", ret); iser_err("fast registration failed, ret:%d\n", ret);
return ret; return ret;
...@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *ib_conn = iser_task->ib_conn; struct iser_conn *iser_conn = iser_task->iser_conn;
struct iser_device *device = ib_conn->device; struct iser_device *device = iser_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
...@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, ...@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
if (mem->dma_nents != 1 || if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
spin_lock_irqsave(&ib_conn->lock, flags); spin_lock_irqsave(&iser_conn->lock, flags);
desc = list_first_entry(&ib_conn->fastreg.pool, desc = list_first_entry(&iser_conn->fastreg.pool,
struct fast_reg_descriptor, list); struct fast_reg_descriptor, list);
list_del(&desc->list); list_del(&desc->list);
spin_unlock_irqrestore(&ib_conn->lock, flags); spin_unlock_irqrestore(&iser_conn->lock, flags);
regd_buf->reg.mem_h = desc; regd_buf->reg.mem_h = desc;
} }
...@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, ...@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
return 0; return 0;
err_reg: err_reg:
if (desc) { if (desc) {
spin_lock_irqsave(&ib_conn->lock, flags); spin_lock_irqsave(&iser_conn->lock, flags);
list_add_tail(&desc->list, &ib_conn->fastreg.pool); list_add_tail(&desc->list, &iser_conn->fastreg.pool);
spin_unlock_irqrestore(&ib_conn->lock, flags); spin_unlock_irqrestore(&iser_conn->lock, flags);
} }
return err; return err;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册