iscsi_iser.h 23.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * iSER transport for the Open iSCSI Initiator & iSER transport internals
 *
 * Copyright (C) 2004 Dmitry Yusupov
 * Copyright (C) 2004 Alex Aizman
 * Copyright (C) 2005 Mike Christie
 * based on code maintained by open-iscsi@googlegroups.com
 *
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
11
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *	- Redistributions of source code must retain the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer.
 *
 *	- Redistributions in binary form must reproduce the above
 *	  copyright notice, this list of conditions and the following
 *	  disclaimer in the documentation and/or other materials
 *	  provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#ifndef __ISCSI_ISER_H__
#define __ISCSI_ISER_H__

#include <linux/types.h>
#include <linux/net.h>
46
#include <linux/printk.h>
47 48
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
49 50
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
51

52
#include <linux/interrupt.h>
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/mempool.h>
#include <linux/uio.h>

#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>

#include <rdma/ib_verbs.h>
#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>

#define DRV_NAME	"iser"
#define PFX		DRV_NAME ": "
S
Sagi Grimberg 已提交
72
#define DRV_VER		"1.6"
73

74 75
#define iser_dbg(fmt, arg...)				 \
	do {						 \
76
		if (unlikely(iser_debug_level > 2))	 \
77 78
			printk(KERN_DEBUG PFX "%s: " fmt,\
				__func__ , ## arg);	 \
79 80 81
	} while (0)

#define iser_warn(fmt, arg...)				\
82
	do {						\
83
		if (unlikely(iser_debug_level > 0))	\
84
			pr_warn(PFX "%s: " fmt,		\
85 86 87 88
				__func__ , ## arg);	\
	} while (0)

#define iser_info(fmt, arg...)				\
89
	do {						\
90
		if (unlikely(iser_debug_level > 1))	\
91
			pr_info(PFX "%s: " fmt,		\
92 93 94
				__func__ , ## arg);	\
	} while (0)

95 96
#define iser_err(fmt, arg...) \
	pr_err(PFX "%s: " fmt, __func__ , ## arg)
97

98
#define SHIFT_4K	12
99
#define SIZE_4K	(1ULL << SHIFT_4K)
100
#define MASK_4K	(~(SIZE_4K-1))
101 102 103 104 105 106 107

/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS		1024
#define ISCSI_ISER_DEF_SG_TABLESIZE	((ISER_DEF_MAX_SECTORS * 512) >> SHIFT_4K)
/* Maximum support is 8MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE	((16384 * 512) >> SHIFT_4K)

108 109 110 111 112 113 114
#define ISER_DEF_XMIT_CMDS_DEFAULT		512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
	#define ISER_DEF_XMIT_CMDS_MAX		ISCSI_DEF_XMIT_CMDS_MAX
#else
	#define ISER_DEF_XMIT_CMDS_MAX		ISER_DEF_XMIT_CMDS_DEFAULT
#endif
#define ISER_DEF_CMD_PER_LUN		ISER_DEF_XMIT_CMDS_MAX
115 116 117 118 119 120 121 122

/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
#define ISER_MAX_RX_MISC_PDUS		4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */

#define ISER_MAX_TX_MISC_PDUS		6 /* NOOP_OUT(2), TEXT(1),         *
					   * SCSI_TMFUNC(2), LOGOUT(1) */

123
#define ISER_QP_MAX_RECV_DTOS		(ISER_DEF_XMIT_CMDS_MAX)
124

125
#define ISER_MIN_POSTED_RX		(ISER_DEF_XMIT_CMDS_MAX >> 2)
126 127 128 129 130 131 132 133 134

/* the max TX (send) WR supported by the iSER QP is defined by                 *
 * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect   *
 * to have at max for SCSI command. The tx posting & completion handling code  *
 * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
 * send WR. D=8 comes from 64K/8K                                              */

#define ISER_INFLIGHT_DATAOUTS		8

135
#define ISER_QP_MAX_REQ_DTOS		(ISER_DEF_XMIT_CMDS_MAX *    \
136 137 138 139
					(1 + ISER_INFLIGHT_DATAOUTS) + \
					ISER_MAX_TX_MISC_PDUS        + \
					ISER_MAX_RX_MISC_PDUS)

140 141 142 143 144 145 146 147 148
/* Max registration work requests per command */
#define ISER_MAX_REG_WR_PER_CMD		5

/* For Signature we don't support DATAOUTs so no need to make room for them */
#define ISER_QP_SIG_MAX_REQ_DTOS	(ISER_DEF_XMIT_CMDS_MAX	*       \
					(1 + ISER_MAX_REG_WR_PER_CMD) + \
					ISER_MAX_TX_MISC_PDUS         + \
					ISER_MAX_RX_MISC_PDUS)

149 150 151 152 153
#define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr			\
					 - ISER_MAX_TX_MISC_PDUS	\
					 - ISER_MAX_RX_MISC_PDUS) /	\
					 (1 + ISER_INFLIGHT_DATAOUTS))

154
#define ISER_SIGNAL_CMD_COUNT 32
155

156 157 158 159
#define ISER_VER			0x10
#define ISER_WSV			0x08
#define ISER_RSV			0x04

160 161 162 163 164 165 166 167 168 169
/**
 * struct iser_hdr - iSER header
 *
 * @flags:        flags support (zbva, remote_inv)
 * @rsvd:         reserved
 * @write_stag:   write rkey
 * @write_va:     write virtual address
 * @reaf_stag:    read rkey
 * @read_va:      read virtual address
 */
170 171 172
struct iser_hdr {
	u8      flags;
	u8      rsvd[3];
173
	__be32  write_stag;
174
	__be64  write_va;
175
	__be32  read_stag;
176 177 178
	__be64  read_va;
} __attribute__((packed));

179 180 181 182 183 184 185 186 187

#define ISER_ZBVA_NOT_SUPPORTED		0x80
#define ISER_SEND_W_INV_NOT_SUPPORTED	0x40

struct iser_cm_hdr {
	u8      flags;
	u8      rsvd[3];
} __packed;

188 189 190 191 192 193
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN  (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))

#define ISER_RECV_DATA_SEG_LEN	128
#define ISER_RX_PAYLOAD_SIZE	(ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
#define ISER_RX_LOGIN_SIZE	(ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
194 195 196 197

/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE		    64

198
enum iser_conn_state {
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	ISER_CONN_INIT,		   /* descriptor allocd, no conn          */
	ISER_CONN_PENDING,	   /* in the process of being established */
	ISER_CONN_UP,		   /* up and running                      */
	ISER_CONN_TERMINATING,	   /* in the process of being terminated  */
	ISER_CONN_DOWN,		   /* shut down                           */
	ISER_CONN_STATES_NUM
};

enum iser_task_status {
	ISER_TASK_STATUS_INIT = 0,
	ISER_TASK_STATUS_STARTED,
	ISER_TASK_STATUS_COMPLETED
};

enum iser_data_dir {
	ISER_DIR_IN = 0,	   /* to initiator */
	ISER_DIR_OUT,		   /* from initiator */
	ISER_DIRS_NUM
};

219 220 221
/**
 * struct iser_data_buf - iSER data buffer
 *
222
 * @sg:           pointer to the sg list
223 224 225 226
 * @size:         num entries of this sg
 * @data_len:     total beffer byte len
 * @dma_nents:    returned by dma_map_sg
 */
227
struct iser_data_buf {
228
	struct scatterlist *sg;
229
	int                size;
230 231
	unsigned long      data_len;
	unsigned int       dma_nents;
232
};
233 234 235

/* fwd declarations */
struct iser_device;
236
struct iscsi_iser_task;
237
struct iscsi_endpoint;
238
struct iser_reg_resources;
239

240 241 242
/**
 * struct iser_mem_reg - iSER memory registration info
 *
243 244
 * @sge:          memory region sg element
 * @rkey:         memory region remote key
245 246
 * @mem_h:        pointer to registration context (FMR/Fastreg)
 */
247
struct iser_mem_reg {
248 249 250
	struct ib_sge	 sge;
	u32		 rkey;
	void		*mem_h;
251 252 253 254 255 256 257 258
};

enum iser_desc_type {
	ISCSI_TX_CONTROL ,
	ISCSI_TX_SCSI_COMMAND,
	ISCSI_TX_DATAOUT
};

259 260 261 262 263 264 265 266
/* Maximum number of work requests per task:
 * Data memory region local invalidate + fast registration
 * Protection memory region local invalidate + fast registration
 * Signature memory region local invalidate + fast registration
 * PDU send
 */
#define ISER_MAX_WRS 7

267
/**
268
 * struct iser_tx_desc - iSER TX descriptor
269 270 271 272 273 274 275 276 277
 *
 * @iser_header:   iser header
 * @iscsi_header:  iscsi header
 * @type:          command/control/dataout
 * @dam_addr:      header buffer dma_address
 * @tx_sg:         sg[0] points to iser/iscsi headers
 *                 sg[1] optionally points to either of immediate data
 *                 unsolicited data-out or control
 * @num_sge:       number sges used on this TX task
278
 * @mapped:        Is the task header mapped
279 280 281 282 283
 * @wr_idx:        Current WR index
 * @wrs:           Array of WRs per task
 * @data_reg:      Data buffer registration details
 * @prot_reg:      Protection buffer registration details
 * @sig_attrs:     Signature attributes
284
 */
285
struct iser_tx_desc {
286 287 288
	struct iser_hdr              iser_header;
	struct iscsi_hdr             iscsi_header;
	enum   iser_desc_type        type;
289 290 291
	u64		             dma_addr;
	struct ib_sge		     tx_sg[2];
	int                          num_sge;
292
	struct ib_cqe		     cqe;
293
	bool			     mapped;
294
	u8                           wr_idx;
C
Christoph Hellwig 已提交
295 296
	union iser_wr {
		struct ib_send_wr		send;
297
		struct ib_reg_wr		fast_reg;
C
Christoph Hellwig 已提交
298 299
		struct ib_sig_handover_wr	sig;
	} wrs[ISER_MAX_WRS];
300 301 302
	struct iser_mem_reg          data_reg;
	struct iser_mem_reg          prot_reg;
	struct ib_sig_attrs          sig_attrs;
303 304
};

305
#define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
306 307
				 sizeof(u64) + sizeof(struct ib_sge) + \
				 sizeof(struct ib_cqe)))
308
/**
309
 * struct iser_rx_desc - iSER RX descriptor
310 311 312 313 314 315 316 317
 *
 * @iser_header:   iser header
 * @iscsi_header:  iscsi header
 * @data:          received data segment
 * @dma_addr:      receive buffer dma address
 * @rx_sg:         ib_sge of receive buffer
 * @pad:           for sense data TODO: Modify to maximum sense length supported
 */
318 319 320 321 322 323
struct iser_rx_desc {
	struct iser_hdr              iser_header;
	struct iscsi_hdr             iscsi_header;
	char		             data[ISER_RECV_DATA_SEG_LEN];
	u64		             dma_addr;
	struct ib_sge		     rx_sg;
324
	struct ib_cqe		     cqe;
325
	char		             pad[ISER_RX_PAD_SIZE];
326
} __packed;
327 328 329 330 331 332 333 334 335

/**
 * struct iser_login_desc - iSER login descriptor
 *
 * @req:           pointer to login request buffer
 * @resp:          pointer to login response buffer
 * @req_dma:       DMA address of login request buffer
 * @rsp_dma:      DMA address of login response buffer
 * @sge:           IB sge for login post recv
336
 * @cqe:           completion handler
337 338 339 340 341 342 343
 */
struct iser_login_desc {
	void                         *req;
	void                         *rsp;
	u64                          req_dma;
	u64                          rsp_dma;
	struct ib_sge                sge;
344
	struct ib_cqe		     cqe;
345 346
} __attribute__((packed));

347
struct iser_conn;
S
Sagi Grimberg 已提交
348
struct ib_conn;
349 350
struct iscsi_iser_task;

351 352 353
/**
 * struct iser_comp - iSER completion context
 *
354
 * @cq:         completion queue
355 356 357 358
 * @active_qps: Number of active QPs attached
 *              to completion context
 */
struct iser_comp {
359
	struct ib_cq		*cq;
360 361 362
	int                      active_qps;
};

S
Sagi Grimberg 已提交
363 364 365 366 367 368
/**
 * struct iser_device - Memory registration operations
 *     per-device registration schemes
 *
 * @alloc_reg_res:     Allocate registration resources
 * @free_reg_res:      Free registration resources
369 370
 * @fast_reg_mem:      Register memory buffers
 * @unreg_mem:         Un-register memory buffers
371 372
 * @reg_desc_get:      Get a registration descriptor for pool
 * @reg_desc_put:      Get a registration descriptor to pool
S
Sagi Grimberg 已提交
373 374 375
 */
struct iser_reg_ops {
	int            (*alloc_reg_res)(struct ib_conn *ib_conn,
376 377
					unsigned cmds_max,
					unsigned int size);
S
Sagi Grimberg 已提交
378
	void           (*free_reg_res)(struct ib_conn *ib_conn);
379 380 381 382 383 384
	int            (*reg_mem)(struct iscsi_iser_task *iser_task,
				  struct iser_data_buf *mem,
				  struct iser_reg_resources *rsc,
				  struct iser_mem_reg *reg);
	void           (*unreg_mem)(struct iscsi_iser_task *iser_task,
				    enum iser_data_dir cmd_dir);
385 386 387
	struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
	void           (*reg_desc_put)(struct ib_conn *ib_conn,
				       struct iser_fr_desc *desc);
S
Sagi Grimberg 已提交
388 389
};

390 391 392 393 394 395 396 397 398 399 400 401
/**
 * struct iser_device - iSER device handle
 *
 * @ib_device:     RDMA device
 * @pd:            Protection Domain for this device
 * @mr:            Global DMA memory region
 * @event_handler: IB events handle routine
 * @ig_list:	   entry in devices list
 * @refcount:      Reference counter, dominated by open iser connections
 * @comps_used:    Number of completion contexts used, Min between online
 *                 cpus and device max completion vectors
 * @comps:         Dinamically allocated array of completion handlers
S
Sagi Grimberg 已提交
402
 * @reg_ops:       Registration ops
403
 */
404 405 406 407
struct iser_device {
	struct ib_device             *ib_device;
	struct ib_pd	             *pd;
	struct ib_mr	             *mr;
408
	struct ib_event_handler      event_handler;
409
	struct list_head             ig_list;
410
	int                          refcount;
411
	int			     comps_used;
412
	struct iser_comp	     *comps;
413
	const struct iser_reg_ops    *reg_ops;
414 415
};

416 417 418 419
#define ISER_CHECK_GUARD	0xc0
#define ISER_CHECK_REFTAG	0x0f
#define ISER_CHECK_APPTAG	0x30

420 421 422 423
/**
 * struct iser_reg_resources - Fast registration recources
 *
 * @mr:         memory region
424 425
 * @fmr_pool:   pool of fmrs
 * @page_vec:   fast reg page list used by fmr pool
426 427 428
 * @mr_valid:   is mr valid indicator
 */
struct iser_reg_resources {
429 430 431 432
	union {
		struct ib_mr             *mr;
		struct ib_fmr_pool       *fmr_pool;
	};
433
	struct iser_page_vec             *page_vec;
434
	u8				  mr_valid:1;
435 436
};

437 438 439
/**
 * struct iser_pi_context - Protection information context
 *
440 441 442 443
 * @rsc:             protection buffer registration resources
 * @sig_mr:          signature enable memory region
 * @sig_mr_valid:    is sig_mr valid indicator
 * @sig_protected:   is region protected indicator
444
 */
445
struct iser_pi_context {
446
	struct iser_reg_resources	rsc;
447
	struct ib_mr                   *sig_mr;
448 449
	u8                              sig_mr_valid:1;
	u8                              sig_protected:1;
450 451
};

452
/**
453
 * struct iser_fr_desc - Fast registration descriptor
454 455
 *
 * @list:           entry in connection fastreg pool
456
 * @rsc:            data buffer registration resources
457 458
 * @pi_ctx:         protection information context
 */
459
struct iser_fr_desc {
460
	struct list_head		  list;
461
	struct iser_reg_resources	  rsc;
462
	struct iser_pi_context		 *pi_ctx;
463 464
};

465 466 467
/**
 * struct iser_fr_pool: connection fast registration pool
 *
468
 * @list:                list of fastreg descriptors
469
 * @lock:                protects fmr/fastreg pool
470
 * @size:                size of the pool
471 472
 */
struct iser_fr_pool {
473 474 475
	struct list_head        list;
	spinlock_t              lock;
	int                     size;
476 477
};

S
Sagi Grimberg 已提交
478 479 480 481 482 483
/**
 * struct ib_conn - Infiniband related objects
 *
 * @cma_id:              rdma_cm connection maneger handle
 * @qp:                  Connection Queue-pair
 * @post_recv_buf_count: post receive counter
M
Max Gurtovoy 已提交
484
 * @sig_count:           send work request signal count
S
Sagi Grimberg 已提交
485 486
 * @rx_wr:               receive work request for batch posts
 * @device:              reference to iser device
487
 * @comp:                iser completion context
488
 * @fr_pool:             connection fast registration poool
489 490 491 492
 * @pi_support:          Indicate device T10-PI support
 * @last:                last send wr to signal all flush errors were drained
 * @last_cqe:            cqe handler for last wr
 * @last_comp:           completes when all connection completions consumed
S
Sagi Grimberg 已提交
493 494 495 496 497
 */
struct ib_conn {
	struct rdma_cm_id           *cma_id;
	struct ib_qp	            *qp;
	int                          post_recv_buf_count;
M
Max Gurtovoy 已提交
498
	u8                           sig_count;
S
Sagi Grimberg 已提交
499 500
	struct ib_recv_wr	     rx_wr[ISER_MIN_POSTED_RX];
	struct iser_device          *device;
501
	struct iser_comp	    *comp;
502
	struct iser_fr_pool          fr_pool;
503 504 505 506 507
	bool			     pi_support;
	struct ib_send_wr	     last;
	struct ib_cqe		     last_cqe;
	struct ib_cqe		     reg_cqe;
	struct completion	     last_comp;
S
Sagi Grimberg 已提交
508 509
};

510 511 512 513 514 515 516 517 518 519 520
/**
 * struct iser_conn - iSER connection context
 *
 * @ib_conn:          connection RDMA resources
 * @iscsi_conn:       link to matching iscsi connection
 * @ep:               transport handle
 * @state:            connection logical state
 * @qp_max_recv_dtos: maximum number of data outs, corresponds
 *                    to max number of post recvs
 * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
 * @min_posted_rx:    (qp_max_recv_dtos >> 2)
521
 * @max_cmds:         maximum cmds allowed for this connection
522 523 524 525 526 527 528 529
 * @name:             connection peer portal
 * @release_work:     deffered work for release job
 * @state_mutex:      protects iser onnection state
 * @stop_completion:  conn_stop completion
 * @ib_completion:    RDMA cleanup completion
 * @up_completion:    connection establishment completed
 *                    (state is ISER_CONN_UP)
 * @conn_list:        entry in ig conn list
530
 * @login_desc:       login descriptor
531 532 533
 * @rx_desc_head:     head of rx_descs cyclic buffer
 * @rx_descs:         rx buffers array (cyclic buffer)
 * @num_rx_descs:     number of rx descriptors
534 535
 * @scsi_sg_tablesize: scsi host sg_tablesize
 * @scsi_max_sectors: scsi host max sectors
536
 */
537
struct iser_conn {
S
Sagi Grimberg 已提交
538
	struct ib_conn		     ib_conn;
539
	struct iscsi_conn	     *iscsi_conn;
540
	struct iscsi_endpoint	     *ep;
541 542 543 544
	enum iser_conn_state	     state;
	unsigned		     qp_max_recv_dtos;
	unsigned		     qp_max_recv_dtos_mask;
	unsigned		     min_posted_rx;
545
	u16                          max_cmds;
546
	char 			     name[ISER_OBJECT_NAME_SIZE];
547
	struct work_struct	     release_work;
548
	struct mutex		     state_mutex;
549 550
	struct completion	     stop_completion;
	struct completion	     ib_completion;
551
	struct completion	     up_completion;
552
	struct list_head	     conn_list;
553
	struct iser_login_desc       login_desc;
554 555
	unsigned int 		     rx_desc_head;
	struct iser_rx_desc	     *rx_descs;
556
	u32                          num_rx_descs;
557 558
	unsigned short               scsi_sg_tablesize;
	unsigned int                 scsi_max_sectors;
559 560
};

561 562 563 564 565 566 567 568 569
/**
 * struct iscsi_iser_task - iser task context
 *
 * @desc:     TX descriptor
 * @iser_conn:        link to iser connection
 * @status:           current task status
 * @sc:               link to scsi command
 * @command_sent:     indicate if command was sent
 * @dir:              iser data direction
570
 * @rdma_reg:         task rdma registration desc
571 572 573
 * @data:             iser data buffer desc
 * @prot:             iser protection buffer desc
 */
574
struct iscsi_iser_task {
575
	struct iser_tx_desc          desc;
576
	struct iser_conn	     *iser_conn;
577
	enum iser_task_status 	     status;
578
	struct scsi_cmnd	     *sc;
579 580
	int                          command_sent;
	int                          dir[ISER_DIRS_NUM];
581
	struct iser_mem_reg          rdma_reg[ISER_DIRS_NUM];
582 583
	struct iser_data_buf         data[ISER_DIRS_NUM];
	struct iser_data_buf         prot[ISER_DIRS_NUM];
584 585 586 587
};

struct iser_page_vec {
	u64 *pages;
S
Sagi Grimberg 已提交
588 589
	int npages;
	struct ib_mr fake_mr;
590 591
};

592 593 594 595 596 597 598 599 600
/**
 * struct iser_global: iSER global context
 *
 * @device_list_mutex:    protects device_list
 * @device_list:          iser devices global list
 * @connlist_mutex:       protects connlist
 * @connlist:             iser connections global list
 * @desc_cache:           kmem cache for tx dataout
 */
601
struct iser_global {
602 603
	struct mutex      device_list_mutex;
	struct list_head  device_list;
604
	struct mutex      connlist_mutex;
605
	struct list_head  connlist;
606
	struct kmem_cache *desc_cache;
607 608 609 610
};

extern struct iser_global ig;
extern int iser_debug_level;
611 612
extern bool iser_pi_enable;
extern int iser_pi_guard;
613
extern unsigned int iser_max_sectors;
614
extern bool iser_always_reg;
615

S
Sagi Grimberg 已提交
616 617
int iser_assign_reg_ops(struct iser_device *device);

618
int iser_send_control(struct iscsi_conn *conn,
619
		      struct iscsi_task *task);
620

621
int iser_send_command(struct iscsi_conn *conn,
622
		      struct iscsi_task *task);
623

624
int iser_send_data_out(struct iscsi_conn *conn,
625
		       struct iscsi_task *task,
626
		       struct iscsi_data *hdr);
627 628

void iscsi_iser_recv(struct iscsi_conn *conn,
629 630 631
		     struct iscsi_hdr *hdr,
		     char *rx_data,
		     int rx_data_len);
632

633
void iser_conn_init(struct iser_conn *iser_conn);
634

635
void iser_conn_release(struct iser_conn *iser_conn);
636

637
int iser_conn_terminate(struct iser_conn *iser_conn);
638

639 640
void iser_release_work(struct work_struct *work);

641 642 643 644 645 646 647 648
void iser_err_comp(struct ib_wc *wc, const char *type);
void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
649

650
void iser_task_rdma_init(struct iscsi_iser_task *task);
651

652
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
653

654
void iser_free_rx_descriptors(struct iser_conn *iser_conn);
655

656 657 658
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
				     struct iser_data_buf *mem,
				     enum iser_data_dir cmd_dir);
659

660 661 662 663
int iser_reg_rdma_mem(struct iscsi_iser_task *task,
		      enum iser_data_dir dir);
void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
			 enum iser_data_dir dir);
664

665 666 667 668
int  iser_connect(struct iser_conn *iser_conn,
		  struct sockaddr *src_addr,
		  struct sockaddr *dst_addr,
		  int non_blocking);
669

670 671
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
			enum iser_data_dir cmd_dir);
672 673
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
			    enum iser_data_dir cmd_dir);
674

675 676
int  iser_post_recvl(struct iser_conn *iser_conn);
int  iser_post_recvm(struct iser_conn *iser_conn, int count);
677 678
int  iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		    bool signal);
679

680
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
681 682 683
			   struct iser_data_buf *data,
			   enum iser_data_dir iser_dir,
			   enum dma_data_direction dma_dir);
684

685
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
686 687 688
			      struct iser_data_buf *data,
			      enum dma_data_direction dir);

689 690
int  iser_initialize_task_headers(struct iscsi_task *task,
			struct iser_tx_desc *tx_desc);
691 692
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
			      struct iscsi_session *session);
693 694 695
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
			unsigned cmds_max,
			unsigned int size);
S
Sagi Grimberg 已提交
696
void iser_free_fmr_pool(struct ib_conn *ib_conn);
697 698 699
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
			    unsigned cmds_max,
			    unsigned int size);
S
Sagi Grimberg 已提交
700
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
701 702
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector);
703
struct iser_fr_desc *
704
iser_reg_desc_get_fr(struct ib_conn *ib_conn);
705
void
706 707 708 709 710 711 712
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
		     struct iser_fr_desc *desc);
struct iser_fr_desc *
iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
void
iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
		      struct iser_fr_desc *desc);
713 714 715 716

static inline struct ib_send_wr *
iser_tx_next_wr(struct iser_tx_desc *tx_desc)
{
C
Christoph Hellwig 已提交
717
	struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
718 719 720
	struct ib_send_wr *last_wr;

	if (tx_desc->wr_idx) {
C
Christoph Hellwig 已提交
721
		last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
722 723 724 725 726 727 728
		last_wr->next = cur_wr;
	}
	tx_desc->wr_idx++;

	return cur_wr;
}

729 730 731 732 733 734
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
{
	return container_of(ib_conn, struct iser_conn, ib_conn);
}

735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
static inline struct iser_rx_desc *
iser_rx(struct ib_cqe *cqe)
{
	return container_of(cqe, struct iser_rx_desc, cqe);
}

static inline struct iser_tx_desc *
iser_tx(struct ib_cqe *cqe)
{
	return container_of(cqe, struct iser_tx_desc, cqe);
}

static inline struct iser_login_desc *
iser_login(struct ib_cqe *cqe)
{
	return container_of(cqe, struct iser_login_desc, cqe);
}

753
#endif