提交 63cf1a90 编写于 作者: B Bart Van Assche 提交者: Jason Gunthorpe

IB/srpt: Add RDMA/CM support

Add a parameter for configuring the port on which the ib_srpt driver
listens for incoming RDMA/CM connections, namely
/sys/kernel/config/target/srpt/discovery_auth/rdma_cm_port. The default
value for this parameter is 0 which means "do not listen for incoming
RDMA/CM connections". Add RDMA/CM support to all code that handles
connection state changes. Modify srpt_init_nodeacl() such that ACLs can
be configured for IPv4 and IPv6 addresses.

Note: incoming connection requests are only accepted for ports that
have been enabled. See also the "if (!sport->enabled)" code in the
connection request handler. See also the following configfs attribute:
/sys/kernel/config/target/srpt/$port/$port/enable.
Signed-off-by: NBart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 63231585
......@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
......@@ -92,6 +93,11 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
/* Protects both rdma_cm_port and rdma_cm_id. */
static DEFINE_MUTEX(rdma_cm_mutex);
/* Port number RDMA/CM will bind to. */
static u16 rdma_cm_port;
static struct rdma_cm_id *rdma_cm_id;
static void srpt_release_cmd(struct se_cmd *se_cmd);
static void srpt_free_ch(struct kref *kref);
static int srpt_queue_status(struct se_cmd *cmd);
......@@ -220,6 +226,9 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
switch (event->event) {
case IB_EVENT_COMM_EST:
if (ch->using_rdma_cm)
rdma_notify(ch->rdma_cm.cm_id, event->event);
else
ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
......@@ -1060,6 +1069,8 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
struct ib_qp_attr *attr;
int ret;
WARN_ON_ONCE(ch->using_rdma_cm);
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
......@@ -1099,6 +1110,8 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int attr_mask;
int ret;
WARN_ON_ONCE(ch->using_rdma_cm);
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
......@@ -1749,19 +1762,34 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
}
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
ch->qp = ch->rdma_cm.cm_id->qp;
} else {
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
if (!IS_ERR(ch->qp)) {
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
ib_destroy_qp(ch->qp);
} else {
ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
sq_size /= 2;
if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
}
pr_err("failed to create_qp ret= %d\n", ret);
if (ret) {
bool retry = sq_size > MIN_SRPT_SQ_SIZE;
if (retry) {
pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
sq_size, ret);
ib_free_cq(ch->cq);
sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
goto retry;
} else {
pr_err("failed to create queue pair with sq_size = %d (%d)\n",
sq_size, ret);
goto err_destroy_cq;
}
}
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
......@@ -1769,10 +1797,6 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
qp_init->cap.max_send_wr, ch);
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
goto err_destroy_qp;
if (!sdev->use_srq)
for (i = 0; i < ch->rq_size; i++)
srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
......@@ -1781,9 +1805,8 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
kfree(qp_init);
return ret;
err_destroy_qp:
ib_destroy_qp(ch->qp);
err_destroy_cq:
ch->qp = NULL;
ib_free_cq(ch->cq);
goto out;
}
......@@ -1852,9 +1875,13 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
if (ch->using_rdma_cm) {
ret = rdma_disconnect(ch->rdma_cm.cm_id);
} else {
ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
}
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
......@@ -2005,6 +2032,9 @@ static void srpt_release_channel_work(struct work_struct *w)
transport_deregister_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
rdma_destroy_id(ch->rdma_cm.cm_id);
else
ib_destroy_cm_id(ch->ib_cm.cm_id);
srpt_destroy_ch_ib(ch);
......@@ -2029,26 +2059,33 @@ static void srpt_release_channel_work(struct work_struct *w)
/**
* srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
* @cm_id: IB/CM connection identifier.
* @port_num: Port through which the IB/CM REQ message was received.
* @sdev: HCA through which the login request was received.
* @ib_cm_id: IB/CM connection identifier in case of IB/CM.
* @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
* @port_num: Port through which the REQ message was received.
* @pkey: P_Key of the incoming connection.
* @req: SRP login request.
* @src_addr: GID of the port that submitted the login request.
* @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
* the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id.
* function returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
static int srpt_cm_req_recv(struct srpt_device *const sdev,
struct ib_cm_id *ib_cm_id,
struct rdma_cm_id *rdma_cm_id,
u8 port_num, __be16 pkey,
const struct srp_login_req *req,
const char *src_addr)
{
struct srpt_device *sdev = cm_id->context;
struct srpt_port *sport = &sdev->port[port_num - 1];
struct srpt_nexus *nexus;
struct srp_login_rsp *rsp = NULL;
struct srp_login_rej *rej = NULL;
struct ib_cm_rep_param *rep_param = NULL;
union {
struct rdma_conn_param rdma_cm;
struct ib_cm_rep_param ib_cm;
} *rep_param = NULL;
struct srpt_rdma_ch *ch;
char i_port_id[36];
u32 it_iu_len;
......@@ -2118,8 +2155,14 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
ch->sport = sport;
ch->ib_cm.cm_id = cm_id;
cm_id->context = ch;
if (ib_cm_id) {
ch->ib_cm.cm_id = ib_cm_id;
ib_cm_id->context = ch;
} else {
ch->using_rdma_cm = true;
ch->rdma_cm.cm_id = rdma_cm_id;
rdma_cm_id->context = ch;
}
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
......@@ -2230,7 +2273,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
mutex_unlock(&sport->mutex);
ret = srpt_ch_qp_rtr(ch, ch->qp);
ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
......@@ -2254,25 +2297,38 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
atomic_set(&ch->req_lim_delta, 0);
/* create cm reply */
rep_param->qp_num = ch->qp->qp_num;
rep_param->private_data = (void *)rsp;
rep_param->private_data_len = sizeof(*rsp);
rep_param->rnr_retry_count = 7;
rep_param->flow_control = 1;
rep_param->failover_accepted = 0;
rep_param->srq = 1;
rep_param->responder_resources = 4;
rep_param->initiator_depth = 4;
if (ch->using_rdma_cm) {
rep_param->rdma_cm.private_data = (void *)rsp;
rep_param->rdma_cm.private_data_len = sizeof(*rsp);
rep_param->rdma_cm.rnr_retry_count = 7;
rep_param->rdma_cm.flow_control = 1;
rep_param->rdma_cm.responder_resources = 4;
rep_param->rdma_cm.initiator_depth = 4;
} else {
rep_param->ib_cm.qp_num = ch->qp->qp_num;
rep_param->ib_cm.private_data = (void *)rsp;
rep_param->ib_cm.private_data_len = sizeof(*rsp);
rep_param->ib_cm.rnr_retry_count = 7;
rep_param->ib_cm.flow_control = 1;
rep_param->ib_cm.failover_accepted = 0;
rep_param->ib_cm.srq = 1;
rep_param->ib_cm.responder_resources = 4;
rep_param->ib_cm.initiator_depth = 4;
}
/*
* Hold the sport mutex while accepting a connection to avoid that
* srpt_disconnect_ch() is invoked concurrently with this code.
*/
mutex_lock(&sport->mutex);
if (sport->enabled && ch->state == CH_CONNECTING)
ret = ib_send_cm_rep(cm_id, rep_param);
if (sport->enabled && ch->state == CH_CONNECTING) {
if (ch->using_rdma_cm)
ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
else
ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
} else {
ret = -EINVAL;
}
mutex_unlock(&sport->mutex);
switch (ret) {
......@@ -2302,7 +2358,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->sport->sdev, ch->rq_size,
ch->max_rsp_size, DMA_TO_DEVICE);
free_ch:
cm_id->context = NULL;
if (ib_cm_id)
ib_cm_id->context = NULL;
kfree(ch);
ch = NULL;
......@@ -2315,8 +2372,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof(*rej));
if (rdma_cm_id)
rdma_reject(rdma_cm_id, rej, sizeof(*rej));
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
out:
kfree(rep_param);
......@@ -2335,10 +2395,44 @@ static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
srpt_format_guid(sguid, sizeof(sguid),
&param->primary_path->dgid.global.interface_id);
return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
param->primary_path->pkey,
private_data, sguid);
}
static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
if (!sdev)
return -ECONNREFUSED;
if (event->param.conn.private_data_len < sizeof(*req_rdma))
return -EINVAL;
/* Transform srp_login_req_rdma into srp_login_req. */
req_rdma = event->param.conn.private_data;
memset(&req, 0, sizeof(req));
req.opcode = req_rdma->opcode;
req.tag = req_rdma->tag;
req.req_it_iu_len = req_rdma->req_it_iu_len;
req.req_buf_fmt = req_rdma->req_buf_fmt;
req.req_flags = req_rdma->req_flags;
memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
memcpy(req.target_port_id, req_rdma->target_port_id, 16);
snprintf(src_addr, sizeof(src_addr), "%pIS",
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
cm_id->route.path_rec->pkey, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
......@@ -2362,14 +2456,14 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
* srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
* @ch: SRPT RDMA channel.
*
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
* An RTU (ready to use) message indicates that the connection has been
* established and that the recipient may begin transmitting.
*/
static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
ret = srpt_ch_qp_rts(ch, ch->qp);
ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
if (ret < 0) {
pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
ch->qp->qp_num);
......@@ -2456,6 +2550,49 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return ret;
}
static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srpt_rdma_ch *ch = cm_id->context;
int ret = 0;
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = srpt_rdma_cm_req_recv(cm_id, event);
break;
case RDMA_CM_EVENT_REJECTED:
srpt_cm_rej_recv(ch, event->status,
event->param.conn.private_data,
event->param.conn.private_data_len);
break;
case RDMA_CM_EVENT_ESTABLISHED:
srpt_cm_rtu_recv(ch);
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (ch->state < CH_DISCONNECTING)
srpt_disconnect_ch(ch);
else
srpt_close_ch(ch);
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
srpt_close_ch(ch);
break;
case RDMA_CM_EVENT_UNREACHABLE:
pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
ch->qp->qp_num);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
break;
default:
pr_err("received unrecognized RDMA CM event %d\n",
event->event);
break;
}
return ret;
}
static int srpt_write_pending_status(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx;
......@@ -2827,7 +2964,7 @@ static void srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
int i;
int i, ret;
pr_debug("device = %p\n", device);
......@@ -2851,9 +2988,15 @@ static void srpt_add_one(struct ib_device *device)
if (!srpt_service_guid)
srpt_service_guid = be64_to_cpu(device->node_guid);
if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id))
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
}
/* print out target login information */
pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
......@@ -2866,8 +3009,14 @@ static void srpt_add_one(struct ib_device *device)
* in the system as service_id; therefore, the target_id will change
* if this HCA is gone bad and replaced by different HCA
*/
if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0))
ret = sdev->cm_id ?
ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
0;
if (ret < 0) {
pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
sdev->cm_id->state);
goto err_cm;
}
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
......@@ -2907,6 +3056,7 @@ static void srpt_add_one(struct ib_device *device)
err_event:
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
err_ring:
srpt_free_srq(sdev);
......@@ -2942,8 +3092,11 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
for (i = 0; i < sdev->device->phys_port_cnt; i++)
cancel_work_sync(&sdev->port[i].work);
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
ib_set_client_data(device, &srpt_client, NULL);
/*
* Unregistering a target must happen after destroying sdev->cm_id
* such that no new SRP_LOGIN_REQ information units can arrive while
......@@ -3106,18 +3259,26 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
if (ret < 0)
pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
out:
return ret;
}
/*
* configfs callback function invoked for
* mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
* configfs callback function invoked for mkdir
* /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*
* i_port_id must be an initiator port GUID, GID or IP address. See also the
* target_alloc_session() calls in this driver. Examples of valid initiator
* port IDs:
* 0x0000000000000000505400fffe4a0b7b
* 0000000000000000505400fffe4a0b7b
* 5054:00ff:fe4a:0b7b
* 192.168.122.76
*/
static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
struct sockaddr_storage sa;
u64 guid;
u8 i_port_id[16];
int ret;
......@@ -3125,6 +3286,9 @@ static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
ret = srpt_parse_guid(&guid, name);
if (ret < 0)
ret = srpt_parse_i_port_id(i_port_id, name);
if (ret < 0)
ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
&sa);
if (ret < 0)
pr_err("invalid initiator port ID %s\n", name);
return ret;
......@@ -3299,6 +3463,95 @@ static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
NULL,
};
static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
{
struct rdma_cm_id *rdma_cm_id;
int ret;
rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma_cm_id)) {
pr_err("RDMA/CM ID creation failed: %ld\n",
PTR_ERR(rdma_cm_id));
goto out;
}
ret = rdma_bind_addr(rdma_cm_id, listen_addr);
if (ret) {
char addr_str[64];
snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
addr_str, ret);
rdma_destroy_id(rdma_cm_id);
rdma_cm_id = ERR_PTR(ret);
goto out;
}
ret = rdma_listen(rdma_cm_id, 128);
if (ret) {
pr_err("rdma_listen() failed: %d\n", ret);
rdma_destroy_id(rdma_cm_id);
rdma_cm_id = ERR_PTR(ret);
}
out:
return rdma_cm_id;
}
static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", rdma_cm_port);
}
static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
const char *page, size_t count)
{
struct sockaddr_in addr4 = { .sin_family = AF_INET };
struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
struct rdma_cm_id *new_id = NULL;
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
if (ret < 0)
return ret;
ret = count;
if (rdma_cm_port == val)
goto out;
if (val) {
addr6.sin6_port = cpu_to_be16(val);
new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
if (IS_ERR(new_id)) {
addr4.sin_port = cpu_to_be16(val);
new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
if (IS_ERR(new_id)) {
ret = PTR_ERR(new_id);
goto out;
}
}
}
mutex_lock(&rdma_cm_mutex);
rdma_cm_port = val;
swap(rdma_cm_id, new_id);
mutex_unlock(&rdma_cm_mutex);
if (new_id)
rdma_destroy_id(new_id);
ret = count;
out:
return ret;
}
CONFIGFS_ATTR(srpt_, rdma_cm_port);
static struct configfs_attribute *srpt_da_attrs[] = {
&srpt_attr_rdma_cm_port,
NULL,
};
static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page)
{
struct se_portal_group *se_tpg = to_tpg(item);
......@@ -3444,6 +3697,7 @@ static const struct target_core_fabric_ops srpt_template = {
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
.tfc_discovery_attrs = srpt_da_attrs,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_base_attrs = srpt_tpg_attrs,
.tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
......@@ -3497,6 +3751,8 @@ static int __init srpt_init_module(void)
static void __exit srpt_cleanup_module(void)
{
if (rdma_cm_id)
rdma_destroy_id(rdma_cm_id);
ib_unregister_client(&srpt_client);
target_unregister_template(&srpt_template);
}
......
......@@ -42,6 +42,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
#include <scsi/srp.h>
......@@ -261,6 +262,7 @@ enum rdma_ch_state {
* @spinlock: Protects free_list and state.
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
* @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
* @processing_wait_list: Whether or not cmd_wait_list is being processed.
* @ioctx_ring: Send ring.
* @ioctx_recv_ring: Receive I/O context ring.
......@@ -280,6 +282,9 @@ struct srpt_rdma_ch {
struct {
struct ib_cm_id *cm_id;
} ib_cm;
struct {
struct rdma_cm_id *cm_id;
} rdma_cm;
};
struct ib_cq *cq;
struct ib_cqe zw_cqe;
......@@ -300,9 +305,10 @@ struct srpt_rdma_ch {
struct list_head list;
struct list_head cmd_wait_list;
uint16_t pkey;
bool using_rdma_cm;
bool processing_wait_list;
struct se_session *sess;
u8 sess_name[24];
u8 sess_name[40];
struct work_struct release_work;
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册