ib_srp.h 7.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#ifndef IB_SRP_H
#define IB_SRP_H

#include <linux/types.h>
#include <linux/list.h>
38
#include <linux/mutex.h>
39
#include <linux/scatterlist.h>
40 41 42 43 44 45 46

#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>

#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
47
#include <rdma/ib_fmr_pool.h>
48 49 50 51 52 53 54

enum {
	SRP_PATH_REC_TIMEOUT_MS	= 1000,
	SRP_ABORT_TIMEOUT_MS	= 5000,

	SRP_PORT_REDIRECT	= 1,
	SRP_DLID_REDIRECT	= 2,
D
David Dillow 已提交
55
	SRP_STALE_CONN		= 3,
56

57
	SRP_DEF_SG_TABLESIZE	= 12,
58

59
	SRP_DEFAULT_QUEUE_SIZE	= 1 << 6,
60 61
	SRP_RSP_SQ_SIZE		= 1,
	SRP_TSK_MGMT_SQ_SIZE	= 1,
62 63
	SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
				  SRP_TSK_MGMT_SQ_SIZE,
64

65 66
	SRP_TAG_NO_REQ		= ~0U,
	SRP_TAG_TSK_MGMT	= 1U << 31,
67

68
	SRP_MAX_PAGES_PER_MR	= 512,
69 70 71
};

enum srp_target_state {
72
	SRP_TARGET_SCANNING,
73
	SRP_TARGET_LIVE,
74
	SRP_TARGET_REMOVED,
75 76
};

77 78 79 80
enum srp_iu_type {
	SRP_IU_CMD,
	SRP_IU_TSK_MGMT,
	SRP_IU_RSP,
81 82
};

83 84 85 86 87 88
/*
 * @mr_page_mask: HCA memory registration page mask.
 * @mr_page_size: HCA memory registration page size.
 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
 *   request.
 */
89 90
struct srp_device {
	struct list_head	dev_list;
91 92
	struct ib_device       *dev;
	struct ib_pd	       *pd;
93
	struct ib_mr	       *global_mr;
94 95 96 97
	u64			mr_page_mask;
	int			mr_page_size;
	int			mr_max_size;
	int			max_pages_per_mr;
98
	bool			has_fmr;
99
	bool			has_fr;
100
	bool			use_fmr;
101
	bool			use_fast_reg;
102 103 104
};

struct srp_host {
105
	struct srp_device      *srp_dev;
106
	u8			port;
107
	struct device		dev;
108
	struct list_head	target_list;
109
	spinlock_t		target_lock;
110 111
	struct completion	released;
	struct list_head	list;
112
	struct mutex		add_target_mutex;
113 114 115 116 117
};

struct srp_request {
	struct scsi_cmnd       *scmnd;
	struct srp_iu	       *cmd;
118 119 120 121
	union {
		struct ib_pool_fmr **fmr_list;
		struct srp_fr_desc **fr_list;
	};
122
	u64		       *map_page;
123 124
	struct srp_direct_buf  *indirect_desc;
	dma_addr_t		indirect_dma_addr;
125
	short			nmdesc;
C
Christoph Hellwig 已提交
126
	struct ib_cqe		reg_cqe;
127 128
};

129 130 131 132 133
/**
 * struct srp_rdma_ch
 * @comp_vector: Completion vector used by this RDMA channel.
 */
struct srp_rdma_ch {
134 135 136 137 138 139
	/* These are RW in the hot path, and commonly used together */
	struct list_head	free_tx;
	spinlock_t		lock;
	s32			req_lim;

	/* These are read-only in the hot path */
140 141
	struct srp_target_port *target ____cacheline_aligned_in_smp;
	struct ib_cq	       *send_cq;
142 143
	struct ib_cq	       *recv_cq;
	struct ib_qp	       *qp;
144 145 146 147
	union {
		struct ib_fmr_pool     *fmr_pool;
		struct srp_fr_pool     *fr_pool;
	};
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/* Everything above this point is used in the hot path of
	 * command processing. Try to keep them packed into cachelines.
	 */

	struct completion	done;
	int			status;

	struct ib_sa_path_rec	path;
	struct ib_sa_query     *path_query;
	int			path_query_id;

	struct ib_cm_id	       *cm_id;
	struct srp_iu	      **tx_ring;
	struct srp_iu	      **rx_ring;
	struct srp_request     *req_ring;
	int			max_ti_iu_len;
	int			comp_vector;

	struct completion	tsk_mgmt_done;
	u8			tsk_mgmt_status;
169
	bool			connected;
170 171 172 173 174 175 176 177 178 179 180 181
};

/**
 * struct srp_target_port
 * @comp_vector: Completion vector used by the first RDMA channel created for
 *   this target port.
 */
struct srp_target_port {
	/* read and written in the hot path */
	spinlock_t		lock;

	/* read only in the hot path */
182
	struct ib_mr		*global_mr;
B
Bart Van Assche 已提交
183 184
	struct srp_rdma_ch	*ch;
	u32			ch_count;
185 186
	u32			lkey;
	enum srp_target_state	state;
187 188
	unsigned int		max_iu_len;
	unsigned int		cmd_sg_cnt;
189 190
	unsigned int		indirect_size;
	bool			allow_ext_sg;
191

192
	/* other member variables */
193
	union ib_gid		sgid;
194 195 196
	__be64			id_ext;
	__be64			ioc_guid;
	__be64			service_id;
197
	__be64			initiator_ext;
198
	u16			io_class;
199 200
	struct srp_host	       *srp_host;
	struct Scsi_Host       *scsi_host;
201
	struct srp_rport       *rport;
202 203
	char			target_name[32];
	unsigned int		scsi_id;
204
	unsigned int		sg_tablesize;
205
	int			mr_pool_size;
206
	int			mr_per_cmd;
207 208
	int			queue_size;
	int			req_ring_size;
209
	int			comp_vector;
210
	int			tl_retry_count;
211

212 213
	union ib_gid		orig_dgid;
	__be16			pkey;
214

215 216
	u32			rq_tmo_jiffies;

217 218
	int			zero_req_lim;

219
	struct work_struct	tl_err_work;
220
	struct work_struct	remove_work;
221 222

	struct list_head	list;
223
	bool			qp_in_error;
224 225 226
};

struct srp_iu {
227
	struct list_head	list;
228
	u64			dma;
229 230 231
	void		       *buf;
	size_t			size;
	enum dma_data_direction	direction;
C
Christoph Hellwig 已提交
232
	struct ib_cqe		cqe;
233 234
};

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/**
 * struct srp_fr_desc - fast registration work request arguments
 * @entry: Entry in srp_fr_pool.free_list.
 * @mr:    Memory region.
 * @frpl:  Fast registration page list.
 */
struct srp_fr_desc {
	struct list_head		entry;
	struct ib_mr			*mr;
};

/**
 * struct srp_fr_pool - pool of fast registration descriptors
 *
 * An entry is available for allocation if and only if it occurs in @free_list.
 *
 * @size:      Number of descriptors in this pool.
 * @max_page_list_len: Maximum fast registration work request page list length.
 * @lock:      Protects free_list.
 * @free_list: List of free descriptors.
 * @desc:      Fast registration descriptor pool.
 */
struct srp_fr_pool {
	int			size;
	int			max_page_list_len;
	spinlock_t		lock;
	struct list_head	free_list;
	struct srp_fr_desc	desc[0];
};

/**
 * struct srp_map_state - per-request DMA memory mapping state
 * @desc:	    Pointer to the element of the SRP buffer descriptor array
 *		    that is being filled in.
 * @pages:	    Array with DMA addresses of pages being considered for
 *		    memory registration.
 * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
 * @dma_len:	    Number of bytes that will be registered with the next
 *		    FMR or FR memory registration call.
 * @total_len:	    Total number of bytes in the sg-list being mapped.
 * @npages:	    Number of page addresses in the pages[] array.
 * @nmdesc:	    Number of FMR or FR memory descriptors used for mapping.
 * @ndesc:	    Number of SRP buffer descriptors that have been filled in.
 */
279
struct srp_map_state {
280
	union {
281 282 283 284 285 286 287 288
		struct {
			struct ib_pool_fmr **next;
			struct ib_pool_fmr **end;
		} fmr;
		struct {
			struct srp_fr_desc **next;
			struct srp_fr_desc **end;
		} fr;
289 290 291 292
		struct {
			void		   **next;
			void		   **end;
		} gen;
293
	};
294
	struct srp_direct_buf  *desc;
295 296 297 298
	union {
		u64			*pages;
		struct scatterlist	*sg;
	};
299
	dma_addr_t		base_dma_addr;
300
	u32			dma_len;
301
	u32			total_len;
B
Bart Van Assche 已提交
302
	unsigned int		npages;
303
	unsigned int		nmdesc;
304 305 306
	unsigned int		ndesc;
};

307
#endif /* IB_SRP_H */