ib_mad.h 27.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6
 * Copyright (c) 2004-2006 Voltaire Corporation.  All rights reserved.
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

37
#if !defined(IB_MAD_H)
L
Linus Torvalds 已提交
38 39
#define IB_MAD_H

40 41
#include <linux/list.h>

42
#include <rdma/ib_verbs.h>
I
Ira Weiny 已提交
43
#include <uapi/rdma/ib_user_mad.h>
L
Linus Torvalds 已提交
44

45
/* Management base versions */
L
Linus Torvalds 已提交
46
#define IB_MGMT_BASE_VERSION			1
47 48
#define OPA_MGMT_BASE_VERSION			0x80

49
#define OPA_SM_CLASS_VERSION			0x80
L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57 58 59

/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED		0x01
#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE	0x81
#define IB_MGMT_CLASS_SUBN_ADM			0x03
#define IB_MGMT_CLASS_PERF_MGMT			0x04
#define IB_MGMT_CLASS_BM			0x05
#define IB_MGMT_CLASS_DEVICE_MGMT		0x06
#define IB_MGMT_CLASS_CM			0x07
#define IB_MGMT_CLASS_SNMP			0x08
60 61 62 63
#define IB_MGMT_CLASS_DEVICE_ADM		0x10
#define IB_MGMT_CLASS_BOOT_MGMT			0x11
#define IB_MGMT_CLASS_BIS			0x12
#define IB_MGMT_CLASS_CONG_MGMT			0x21
L
Linus Torvalds 已提交
64 65 66
#define IB_MGMT_CLASS_VENDOR_RANGE2_START	0x30
#define IB_MGMT_CLASS_VENDOR_RANGE2_END		0x4F

H
Hal Rosenstock 已提交
67 68
#define	IB_OPENIB_OUI				(0x001405)

L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79
/* Management methods */
#define IB_MGMT_METHOD_GET			0x01
#define IB_MGMT_METHOD_SET			0x02
#define IB_MGMT_METHOD_GET_RESP			0x81
#define IB_MGMT_METHOD_SEND			0x03
#define IB_MGMT_METHOD_TRAP			0x05
#define IB_MGMT_METHOD_REPORT			0x06
#define IB_MGMT_METHOD_REPORT_RESP		0x86
#define IB_MGMT_METHOD_TRAP_REPRESS		0x07

#define IB_MGMT_METHOD_RESP			0x80
80
#define IB_BM_ATTR_MOD_RESP			cpu_to_be32(1)
L
Linus Torvalds 已提交
81 82 83

#define IB_MGMT_MAX_METHODS			128

84 85 86 87 88 89 90 91 92
/* MAD Status field bit masks */
#define IB_MGMT_MAD_STATUS_SUCCESS			0x0000
#define IB_MGMT_MAD_STATUS_BUSY				0x0001
#define IB_MGMT_MAD_STATUS_REDIRECT_REQD		0x0002
#define IB_MGMT_MAD_STATUS_BAD_VERSION			0x0004
#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD		0x0008
#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB	0x000c
#define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE		0x001c

H
Hal Rosenstock 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
/* RMPP information */
#define IB_MGMT_RMPP_VERSION			1

#define IB_MGMT_RMPP_TYPE_DATA			1
#define IB_MGMT_RMPP_TYPE_ACK			2
#define IB_MGMT_RMPP_TYPE_STOP			3
#define IB_MGMT_RMPP_TYPE_ABORT			4

#define IB_MGMT_RMPP_FLAG_ACTIVE		1
#define IB_MGMT_RMPP_FLAG_FIRST			(1<<1)
#define IB_MGMT_RMPP_FLAG_LAST			(1<<2)

#define IB_MGMT_RMPP_NO_RESPTIME		0x1F

#define	IB_MGMT_RMPP_STATUS_SUCCESS		0
#define	IB_MGMT_RMPP_STATUS_RESX		1
109
#define	IB_MGMT_RMPP_STATUS_ABORT_MIN		118
H
Hal Rosenstock 已提交
110 111 112 113 114 115 116 117 118 119
#define	IB_MGMT_RMPP_STATUS_T2L			118
#define	IB_MGMT_RMPP_STATUS_BAD_LEN		119
#define	IB_MGMT_RMPP_STATUS_BAD_SEG		120
#define	IB_MGMT_RMPP_STATUS_BADT		121
#define	IB_MGMT_RMPP_STATUS_W2S			122
#define	IB_MGMT_RMPP_STATUS_S2B			123
#define	IB_MGMT_RMPP_STATUS_BAD_STATUS		124
#define	IB_MGMT_RMPP_STATUS_UNV			125
#define	IB_MGMT_RMPP_STATUS_TMR			126
#define	IB_MGMT_RMPP_STATUS_UNSPEC		127
120
#define	IB_MGMT_RMPP_STATUS_ABORT_MAX		127
H
Hal Rosenstock 已提交
121

L
Linus Torvalds 已提交
122
#define IB_QP0		0
123
#define IB_QP1		cpu_to_be32(1)
L
Linus Torvalds 已提交
124
#define IB_QP1_QKEY	0x80010000
125
#define IB_QP_SET_QKEY	0x80000000
L
Linus Torvalds 已提交
126

127 128 129
#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
#define IB_DEFAULT_PKEY_FULL	0xFFFF

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/*
 * Generic trap/notice types
 */
#define IB_NOTICE_TYPE_FATAL	0x80
#define IB_NOTICE_TYPE_URGENT	0x81
#define IB_NOTICE_TYPE_SECURITY	0x82
#define IB_NOTICE_TYPE_SM	0x83
#define IB_NOTICE_TYPE_INFO	0x84

/*
 * Generic trap/notice producers
 */
#define IB_NOTICE_PROD_CA		cpu_to_be16(1)
#define IB_NOTICE_PROD_SWITCH		cpu_to_be16(2)
#define IB_NOTICE_PROD_ROUTER		cpu_to_be16(3)
#define IB_NOTICE_PROD_CLASS_MGR	cpu_to_be16(4)

147
enum {
148
	IB_MGMT_MAD_HDR = 24,
149
	IB_MGMT_MAD_DATA = 232,
150
	IB_MGMT_RMPP_HDR = 36,
151
	IB_MGMT_RMPP_DATA = 220,
152
	IB_MGMT_VENDOR_HDR = 40,
153
	IB_MGMT_VENDOR_DATA = 216,
154 155
	IB_MGMT_SA_HDR = 56,
	IB_MGMT_SA_DATA = 200,
156 157
	IB_MGMT_DEVICE_HDR = 64,
	IB_MGMT_DEVICE_DATA = 192,
158
	IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
159 160 161
	OPA_MGMT_MAD_DATA = 2024,
	OPA_MGMT_RMPP_DATA = 2012,
	OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
162 163
};

L
Linus Torvalds 已提交
164 165 166 167 168
struct ib_mad_hdr {
	u8	base_version;
	u8	mgmt_class;
	u8	class_version;
	u8	method;
169 170 171 172 173 174
	__be16	status;
	__be16	class_specific;
	__be64	tid;
	__be16	attr_id;
	__be16	resv;
	__be32	attr_mod;
H
Hal Rosenstock 已提交
175
};
L
Linus Torvalds 已提交
176 177 178 179 180 181

struct ib_rmpp_hdr {
	u8	rmpp_version;
	u8	rmpp_type;
	u8	rmpp_rtime_flags;
	u8	rmpp_status;
182 183
	__be32	seg_num;
	__be32	paylen_newwin;
H
Hal Rosenstock 已提交
184 185 186 187
};

typedef u64 __bitwise ib_sa_comp_mask;

188
#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n)))
H
Hal Rosenstock 已提交
189 190 191 192 193 194 195 196

/*
 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
 * lay them out wrong otherwise.  (And unfortunately they are sent on
 * the wire so we can't change the layout)
 */
struct ib_sa_hdr {
197 198 199
	__be64			sm_key;
	__be16			attr_offset;
	__be16			reserved;
H
Hal Rosenstock 已提交
200
	ib_sa_comp_mask		comp_mask;
L
Linus Torvalds 已提交
201 202 203 204
} __attribute__ ((packed));

struct ib_mad {
	struct ib_mad_hdr	mad_hdr;
205
	u8			data[IB_MGMT_MAD_DATA];
H
Hal Rosenstock 已提交
206
};
L
Linus Torvalds 已提交
207

208 209 210 211 212
struct opa_mad {
	struct ib_mad_hdr	mad_hdr;
	u8			data[OPA_MGMT_MAD_DATA];
};

L
Linus Torvalds 已提交
213 214 215
struct ib_rmpp_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
216
	u8			data[IB_MGMT_RMPP_DATA];
H
Hal Rosenstock 已提交
217 218
};

I
Ira Weiny 已提交
219 220 221 222 223 224
struct opa_rmpp_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	u8			data[OPA_MGMT_RMPP_DATA];
};

H
Hal Rosenstock 已提交
225 226 227 228
struct ib_sa_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	struct ib_sa_hdr	sa_hdr;
229
	u8			data[IB_MGMT_SA_DATA];
L
Linus Torvalds 已提交
230 231 232 233 234 235 236
} __attribute__ ((packed));

struct ib_vendor_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	u8			reserved;
	u8			oui[3];
237
	u8			data[IB_MGMT_VENDOR_DATA];
H
Hal Rosenstock 已提交
238
};
L
Linus Torvalds 已提交
239

240 241
#define IB_MGMT_CLASSPORTINFO_ATTR_ID	cpu_to_be16(0x0001)

242 243 244
#define IB_CLASS_PORT_INFO_RESP_TIME_MASK	0x1F
#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5

245
struct ib_class_port_info {
246 247 248
	u8			base_version;
	u8			class_version;
	__be16			capability_mask;
249 250
	  /* 27 bits for cap_mask2, 5 bits for resp_time */
	__be32			cap_mask2_resp_time;
251 252 253 254 255 256 257 258 259 260 261 262 263 264
	u8			redirect_gid[16];
	__be32			redirect_tcslfl;
	__be16			redirect_lid;
	__be16			redirect_pkey;
	__be32			redirect_qp;
	__be32			redirect_qkey;
	u8			trap_gid[16];
	__be32			trap_tcslfl;
	__be16			trap_lid;
	__be16			trap_pkey;
	__be32			trap_hlqp;
	__be32			trap_qkey;
};

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
struct opa_class_port_info {
	u8 base_version;
	u8 class_version;
	__be16 cap_mask;
	__be32 cap_mask2_resp_time;

	u8 redirect_gid[16];
	__be32 redirect_tc_fl;
	__be32 redirect_lid;
	__be32 redirect_sl_qp;
	__be32 redirect_qkey;

	u8 trap_gid[16];
	__be32 trap_tc_fl;
	__be32 trap_lid;
	__be32 trap_hl_qp;
	__be32 trap_qkey;

	__be16 trap_pkey;
	__be16 redirect_pkey;

	u8 trap_sl_rsvd;
	u8 reserved[3];
} __packed;

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/**
 * ib_get_cpi_resp_time - Returns the resp_time value from
 * cap_mask2_resp_time in ib_class_port_info.
 * @cpi: A struct ib_class_port_info mad.
 */
static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
{
	return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
		    IB_CLASS_PORT_INFO_RESP_TIME_MASK);
}

/**
 * ib_set_cpi_resptime - Sets the response time in an
 * ib_class_port_info mad.
 * @cpi: A struct ib_class_port_info.
 * @rtime: The response time to set.
 */
static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
					u8 rtime)
{
	cpi->cap_mask2_resp_time =
		(cpi->cap_mask2_resp_time &
		 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
		cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
}

/**
 * ib_get_cpi_capmask2 - Returns the capmask2 value from
 * cap_mask2_resp_time in ib_class_port_info.
 * @cpi: A struct ib_class_port_info mad.
 */
static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
{
	return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
		IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}

/**
 * ib_set_cpi_capmask2 - Sets the capmask2 in an
 * ib_class_port_info mad.
 * @cpi: A struct ib_class_port_info.
 * @capmask2: The capmask2 to set.
 */
static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
				       u32 capmask2)
{
	cpi->cap_mask2_resp_time =
		(cpi->cap_mask2_resp_time &
		 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
		cpu_to_be32(capmask2 <<
			    IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
struct ib_mad_notice_attr {
	u8 generic_type;
	u8 prod_type_msb;
	__be16 prod_type_lsb;
	__be16 trap_num;
	__be16 issuer_lid;
	__be16 toggle_count;

	union {
		struct {
			u8	details[54];
		} raw_data;

		struct {
			__be16	reserved;
			__be16	lid;		/* where violation happened */
			u8	port_num;	/* where violation happened */
		} __packed ntc_129_131;

		struct {
			__be16	reserved;
			__be16	lid;		/* LID where change occurred */
			u8	reserved2;
			u8	local_changes;	/* low bit - local changes */
			__be32	new_cap_mask;	/* new capability mask */
			u8	reserved3;
			u8	change_flags;	/* low 3 bits only */
		} __packed ntc_144;

		struct {
			__be16	reserved;
			__be16	lid;		/* lid where sys guid changed */
			__be16	reserved2;
			__be64	new_sys_guid;
		} __packed ntc_145;

		struct {
			__be16	reserved;
			__be16	lid;
			__be16	dr_slid;
			u8	method;
			u8	reserved2;
			__be16	attr_id;
			__be32	attr_mod;
			__be64	mkey;
			u8	reserved3;
			u8	dr_trunc_hop;
			u8	dr_rtn_path[30];
		} __packed ntc_256;

		struct {
			__be16		reserved;
			__be16		lid1;
			__be16		lid2;
			__be32		key;
			__be32		sl_qp1;	/* SL: high 4 bits */
			__be32		qp2;	/* high 8 bits reserved */
			union ib_gid	gid1;
			union ib_gid	gid2;
		} __packed ntc_257_258;

	} details;
};

407 408
/**
 * ib_mad_send_buf - MAD data buffer and work request for sends.
409
 * @next: A pointer used to chain together MADs for posting.
410 411 412
 * @mad: References an allocated MAD data buffer for MADs that do not have
 *   RMPP active.  For MADs using RMPP, references the common and management
 *   class specific headers.
413
 * @mad_agent: MAD agent that allocated the buffer.
414
 * @ah: The address handle to use when sending the MAD.
415
 * @context: User-controlled context fields.
416 417 418 419
 * @hdr_len: Indicates the size of the data header of the MAD.  This length
 *   includes the common MAD, RMPP, and class specific headers.
 * @data_len: Indicates the total size of user-transferred data.
 * @seg_count: The number of RMPP segments allocated for this send.
420 421 422 423
 * @seg_size: Size of the data in each RMPP segment.  This does not include
 *   class specific headers.
 * @seg_rmpp_size: Size of each RMPP segment including the class specific
 *   headers.
424
 * @timeout_ms: Time to wait for a response.
425 426 427
 * @retries: Number of times to retry a request for a response.  For MADs
 *   using RMPP, this applies per window.  On completion, returns the number
 *   of retries needed to complete the transfer.
428 429
 *
 * Users are responsible for initializing the MAD buffer itself, with the
430 431
 * exception of any RMPP header.  Additional segment buffer space allocated
 * beyond data_len is padding.
432 433
 */
struct ib_mad_send_buf {
434 435
	struct ib_mad_send_buf	*next;
	void			*mad;
436
	struct ib_mad_agent	*mad_agent;
437
	struct ib_ah		*ah;
438
	void			*context[2];
439 440 441 442
	int			hdr_len;
	int			data_len;
	int			seg_count;
	int			seg_size;
443
	int			seg_rmpp_size;
444 445
	int			timeout_ms;
	int			retries;
446 447
};

448 449 450 451
/**
 * ib_response_mad - Returns if the specified MAD has been generated in
 *   response to a sent request or trap.
 */
452
int ib_response_mad(const struct ib_mad_hdr *hdr);
453

H
Hal Rosenstock 已提交
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
/**
 * ib_get_rmpp_resptime - Returns the RMPP response time.
 * @rmpp_hdr: An RMPP header.
 */
static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
{
	return rmpp_hdr->rmpp_rtime_flags >> 3;
}

/**
 * ib_get_rmpp_flags - Returns the RMPP flags.
 * @rmpp_hdr: An RMPP header.
 */
static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
{
	return rmpp_hdr->rmpp_rtime_flags & 0x7;
}

/**
 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
 * @rmpp_hdr: An RMPP header.
 * @rtime: The response time to set.
 */
static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
{
	rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
}

/**
 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
 * @rmpp_hdr: An RMPP header.
 * @flags: The flags to set.
 */
static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
{
489
	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
H
Hal Rosenstock 已提交
490 491 492
				     (flags & 0x7);
}

L
Linus Torvalds 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
struct ib_mad_agent;
struct ib_mad_send_wc;
struct ib_mad_recv_wc;

/**
 * ib_mad_send_handler - callback handler for a sent MAD.
 * @mad_agent: MAD agent that sent the MAD.
 * @mad_send_wc: Send work completion information on the sent MAD.
 */
typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
				    struct ib_mad_send_wc *mad_send_wc);

/**
 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
 * @mad_agent: MAD agent that snooped the MAD.
508
 * @send_buf: send MAD data buffer.
L
Linus Torvalds 已提交
509 510 511
 * @mad_send_wc: Work completion information on the sent MAD.  Valid
 *   only for snooping that occurs on a send completion.
 *
512
 * Clients snooping MADs should not modify data referenced by the @send_buf
L
Linus Torvalds 已提交
513 514 515
 * or @mad_send_wc.
 */
typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
516
				     struct ib_mad_send_buf *send_buf,
L
Linus Torvalds 已提交
517 518 519 520 521
				     struct ib_mad_send_wc *mad_send_wc);

/**
 * ib_mad_recv_handler - callback handler for a received MAD.
 * @mad_agent: MAD agent requesting the received MAD.
522
 * @send_buf: Send buffer if found, else NULL
L
Linus Torvalds 已提交
523 524 525
 * @mad_recv_wc: Received work completion information on the received MAD.
 *
 * MADs received in response to a send request operation will be handed to
526
 * the user before the send operation completes.  All data buffers given
L
Linus Torvalds 已提交
527 528 529 530 531
 * to registered agents through this routine are owned by the receiving
 * client, except for snooping agents.  Clients snooping MADs should not
 * modify the data referenced by @mad_recv_wc.
 */
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
532
				    struct ib_mad_send_buf *send_buf,
L
Linus Torvalds 已提交
533 534 535 536 537 538
				    struct ib_mad_recv_wc *mad_recv_wc);

/**
 * ib_mad_agent - Used to track MAD registration with the access layer.
 * @device: Reference to device registration is on.
 * @qp: Reference to QP used for sending and receiving MADs.
H
Hal Rosenstock 已提交
539
 * @mr: Memory region for system memory usable for DMA.
L
Linus Torvalds 已提交
540 541 542 543 544 545 546
 * @recv_handler: Callback handler for a received MAD.
 * @send_handler: Callback handler for a sent MAD.
 * @snoop_handler: Callback handler for snooped sent MADs.
 * @context: User-specified context associated with this registration.
 * @hi_tid: Access layer assigned transaction ID for this client.
 *   Unsolicited MADs sent by this client will have the upper 32-bits
 *   of their TID set to this value.
547
 * @flags: registration flags
L
Linus Torvalds 已提交
548
 * @port_num: Port number on which QP is registered
H
Hal Rosenstock 已提交
549
 * @rmpp_version: If set, indicates the RMPP version used by this agent.
L
Linus Torvalds 已提交
550
 */
I
Ira Weiny 已提交
551 552 553
enum {
	IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
};
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561
struct ib_mad_agent {
	struct ib_device	*device;
	struct ib_qp		*qp;
	ib_mad_recv_handler	recv_handler;
	ib_mad_send_handler	send_handler;
	ib_mad_snoop_handler	snoop_handler;
	void			*context;
	u32			hi_tid;
562
	u32			flags;
L
Linus Torvalds 已提交
563
	u8			port_num;
H
Hal Rosenstock 已提交
564
	u8			rmpp_version;
L
Linus Torvalds 已提交
565 566 567 568
};

/**
 * ib_mad_send_wc - MAD send completion information.
569
 * @send_buf: Send MAD data buffer associated with the send MAD request.
L
Linus Torvalds 已提交
570 571 572 573 574
 * @status: Completion status.
 * @vendor_err: Optional vendor error information returned with a failed
 *   request.
 */
struct ib_mad_send_wc {
575
	struct ib_mad_send_buf	*send_buf;
L
Linus Torvalds 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
	enum ib_wc_status	status;
	u32			vendor_err;
};

/**
 * ib_mad_recv_buf - received MAD buffer information.
 * @list: Reference to next data buffer for a received RMPP MAD.
 * @grh: References a data buffer containing the global route header.
 *   The data refereced by this buffer is only valid if the GRH is
 *   valid.
 * @mad: References the start of the received MAD.
 */
struct ib_mad_recv_buf {
	struct list_head	list;
	struct ib_grh		*grh;
591 592 593 594
	union {
		struct ib_mad	*mad;
		struct opa_mad	*opa_mad;
	};
L
Linus Torvalds 已提交
595 596 597 598 599 600
};

/**
 * ib_mad_recv_wc - received MAD information.
 * @wc: Completion information for the received data.
 * @recv_buf: Specifies the location of the received data buffer(s).
H
Hal Rosenstock 已提交
601
 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
L
Linus Torvalds 已提交
602
 * @mad_len: The length of the received MAD, without duplicated headers.
I
Ira Weiny 已提交
603
 * @mad_seg_size: The size of individual MAD segments
L
Linus Torvalds 已提交
604
 *
605
 * For received response, the wr_id contains a pointer to the ib_mad_send_buf
L
Linus Torvalds 已提交
606 607 608 609 610
 *   for the corresponding send request.
 */
struct ib_mad_recv_wc {
	struct ib_wc		*wc;
	struct ib_mad_recv_buf	recv_buf;
H
Hal Rosenstock 已提交
611
	struct list_head	rmpp_list;
L
Linus Torvalds 已提交
612
	int			mad_len;
I
Ira Weiny 已提交
613
	size_t			mad_seg_size;
L
Linus Torvalds 已提交
614 615 616 617 618 619 620 621 622 623 624 625 626
};

/**
 * ib_mad_reg_req - MAD registration request
 * @mgmt_class: Indicates which management class of MADs should be receive
 *   by the caller.  This field is only required if the user wishes to
 *   receive unsolicited MADs, otherwise it should be 0.
 * @mgmt_class_version: Indicates which version of MADs for the given
 *   management class to receive.
 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
 *   in the range from 0x30 to 0x4f. Otherwise not used.
 * @method_mask: The caller will receive unsolicited MADs for any method
 *   where @method_mask = 1.
627
 *
L
Linus Torvalds 已提交
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
 */
struct ib_mad_reg_req {
	u8	mgmt_class;
	u8	mgmt_class_version;
	u8	oui[3];
	DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
};

/**
 * ib_register_mad_agent - Register to send/receive MADs.
 * @device: The device to register with.
 * @port_num: The port on the specified device to use.
 * @qp_type: Specifies which QP to access.  Must be either
 *   IB_QPT_SMI or IB_QPT_GSI.
 * @mad_reg_req: Specifies which unsolicited MADs should be received
 *   by the caller.  This parameter may be NULL if the caller only
 *   wishes to receive solicited responses.
 * @rmpp_version: If set, indicates that the client will send
 *   and receive MADs that contain the RMPP header for the given version.
 *   If set to 0, indicates that RMPP is not used by this client.
 * @send_handler: The completion callback routine invoked after a send
 *   request has completed.
 * @recv_handler: The completion callback routine invoked for a received
 *   MAD.
 * @context: User specified context associated with the registration.
653
 * @registration_flags: Registration flags to set for this agent
L
Linus Torvalds 已提交
654 655 656 657 658 659 660 661
 */
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   struct ib_mad_reg_req *mad_reg_req,
					   u8 rmpp_version,
					   ib_mad_send_handler send_handler,
					   ib_mad_recv_handler recv_handler,
662 663
					   void *context,
					   u32 registration_flags);
L
Linus Torvalds 已提交
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700

enum ib_mad_snoop_flags {
	/*IB_MAD_SNOOP_POSTED_SENDS	   = 1,*/
	/*IB_MAD_SNOOP_RMPP_SENDS	   = (1<<1),*/
	IB_MAD_SNOOP_SEND_COMPLETIONS	   = (1<<2),
	/*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
	IB_MAD_SNOOP_RECVS		   = (1<<4)
	/*IB_MAD_SNOOP_RMPP_RECVS	   = (1<<5),*/
	/*IB_MAD_SNOOP_REDIRECTED_QPS	   = (1<<6)*/
};

/**
 * ib_register_mad_snoop - Register to snoop sent and received MADs.
 * @device: The device to register with.
 * @port_num: The port on the specified device to use.
 * @qp_type: Specifies which QP traffic to snoop.  Must be either
 *   IB_QPT_SMI or IB_QPT_GSI.
 * @mad_snoop_flags: Specifies information where snooping occurs.
 * @send_handler: The callback routine invoked for a snooped send.
 * @recv_handler: The callback routine invoked for a snooped receive.
 * @context: User specified context associated with the registration.
 */
struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   int mad_snoop_flags,
					   ib_mad_snoop_handler snoop_handler,
					   ib_mad_recv_handler recv_handler,
					   void *context);

/**
 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
 * @mad_agent: Corresponding MAD registration request to deregister.
 *
 * After invoking this routine, MAD services are no longer usable by the
 * client on the associated QP.
 */
701
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
L
Linus Torvalds 已提交
702 703 704 705

/**
 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
 *   with the registered client.
706 707 708
 * @send_buf: Specifies the information needed to send the MAD(s).
 * @bad_send_buf: Specifies the MAD on which an error was encountered.  This
 *   parameter is optional if only a single MAD is posted.
L
Linus Torvalds 已提交
709 710
 *
 * Sent MADs are not guaranteed to complete in the order that they were posted.
H
Hal Rosenstock 已提交
711 712 713 714 715 716 717 718 719 720
 *
 * If the MAD requires RMPP, the data buffer should contain a single copy
 * of the common MAD, RMPP, and class specific headers, followed by the class
 * defined data.  If the class defined data would not divide evenly into
 * RMPP segments, then space must be allocated at the end of the referenced
 * buffer for any required padding.  To indicate the amount of class defined
 * data being transferred, the paylen_newwin field in the RMPP header should
 * be set to the size of the class specific header plus the amount of class
 * defined data being transferred.  The paylen_newwin field should be
 * specified in network-byte order.
L
Linus Torvalds 已提交
721
 */
722 723
int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
		     struct ib_mad_send_buf **bad_send_buf);
L
Linus Torvalds 已提交
724 725 726


/**
H
Hal Rosenstock 已提交
727
 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
L
Linus Torvalds 已提交
728 729 730 731 732 733 734 735 736 737
 * @mad_recv_wc: Work completion information for a received MAD.
 *
 * Clients receiving MADs through their ib_mad_recv_handler must call this
 * routine to return the work completion buffers to the access layer.
 */
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);

/**
 * ib_cancel_mad - Cancels an outstanding send MAD operation.
 * @mad_agent: Specifies the registration associated with sent MAD.
738
 * @send_buf: Indicates the MAD to cancel.
L
Linus Torvalds 已提交
739 740 741 742
 *
 * MADs will be returned to the user through the corresponding
 * ib_mad_send_handler.
 */
743 744
void ib_cancel_mad(struct ib_mad_agent *mad_agent,
		   struct ib_mad_send_buf *send_buf);
745 746 747 748

/**
 * ib_modify_mad - Modifies an outstanding send MAD operation.
 * @mad_agent: Specifies the registration associated with sent MAD.
749
 * @send_buf: Indicates the MAD to modify.
750 751 752 753 754
 * @timeout_ms: New timeout value for sent MAD.
 *
 * This call will reset the timeout value for a sent MAD to the specified
 * value.
 */
755 756
int ib_modify_mad(struct ib_mad_agent *mad_agent,
		  struct ib_mad_send_buf *send_buf, u32 timeout_ms);
L
Linus Torvalds 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797

/**
 * ib_redirect_mad_qp - Registers a QP for MAD services.
 * @qp: Reference to a QP that requires MAD services.
 * @rmpp_version: If set, indicates that the client will send
 *   and receive MADs that contain the RMPP header for the given version.
 *   If set to 0, indicates that RMPP is not used by this client.
 * @send_handler: The completion callback routine invoked after a send
 *   request has completed.
 * @recv_handler: The completion callback routine invoked for a received
 *   MAD.
 * @context: User specified context associated with the registration.
 *
 * Use of this call allows clients to use MAD services, such as RMPP,
 * on user-owned QPs.  After calling this routine, users may send
 * MADs on the specified QP by calling ib_mad_post_send.
 */
struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
					u8 rmpp_version,
					ib_mad_send_handler send_handler,
					ib_mad_recv_handler recv_handler,
					void *context);

/**
 * ib_process_mad_wc - Processes a work completion associated with a
 *   MAD sent or received on a redirected QP.
 * @mad_agent: Specifies the registered MAD service using the redirected QP.
 * @wc: References a work completion associated with a sent or received
 *   MAD segment.
 *
 * This routine is used to complete or continue processing on a MAD request.
 * If the work completion is associated with a send operation, calling
 * this routine is required to continue an RMPP transfer or to wait for a
 * corresponding response, if it is a request.  If the work completion is
 * associated with a receive operation, calling this routine is required to
 * process an inbound or outbound RMPP transfer, or to match a response MAD
 * with its corresponding request.
 */
int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
		      struct ib_wc *wc);

798 799 800 801 802 803 804
/**
 * ib_create_send_mad - Allocate and initialize a data buffer and work request
 *   for sending a MAD.
 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
 * @remote_qpn: Specifies the QPN of the receiving node.
 * @pkey_index: Specifies which PKey the MAD will be sent using.  This field
 *   is valid only if the remote_qpn is QP 1.
H
Hal Rosenstock 已提交
805
 * @rmpp_active: Indicates if the send will enable RMPP.
806 807 808
 * @hdr_len: Indicates the size of the data header of the MAD.  This length
 *   should include the common MAD header, RMPP header, plus any class
 *   specific header.
H
Hal Rosenstock 已提交
809
 * @data_len: Indicates the size of any user-transferred data.  The call will
810 811 812
 *   automatically adjust the allocated buffer size to account for any
 *   additional padding that may be necessary.
 * @gfp_mask: GFP mask used for the memory allocation.
813
 * @base_version: Base Version of this MAD
814
 *
815 816
 * This routine allocates a MAD for sending.  The returned MAD send buffer
 * will reference a data buffer usable for sending a MAD, along
H
Hal Rosenstock 已提交
817
 * with an initialized work request structure.  Users may modify the returned
818
 * MAD data buffer before posting the send.
H
Hal Rosenstock 已提交
819
 *
820 821 822 823
 * The returned MAD header, class specific headers, and any padding will be
 * cleared.  Users are responsible for initializing the common MAD header,
 * any class specific header, and MAD data area.
 * If @rmpp_active is set, the RMPP header will be initialized for sending.
824
 */
825 826 827 828
struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
					   u32 remote_qpn, u16 pkey_index,
					   int rmpp_active,
					   int hdr_len, int data_len,
829 830
					   gfp_t gfp_mask,
					   u8 base_version);
831

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
/**
 * ib_is_mad_class_rmpp - returns whether given management class
 * supports RMPP.
 * @mgmt_class: management class
 *
 * This routine returns whether the management class supports RMPP.
 */
int ib_is_mad_class_rmpp(u8 mgmt_class);

/**
 * ib_get_mad_data_offset - returns the data offset for a given
 * management class.
 * @mgmt_class: management class
 *
 * This routine returns the data offset in the MAD for the management
 * class requested.
 */
int ib_get_mad_data_offset(u8 mgmt_class);

851 852 853 854 855 856 857 858 859 860
/**
 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
 * @send_buf: Previously allocated send data buffer.
 * @seg_num: number of segment to return
 *
 * This routine returns a pointer to the data buffer of an RMPP MAD.
 * Users must provide synchronization to @send_buf around this call.
 */
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);

861 862 863 864 865 866
/**
 * ib_free_send_mad - Returns data buffers used to send a MAD.
 * @send_buf: Previously allocated send data buffer.
 */
void ib_free_send_mad(struct ib_mad_send_buf *send_buf);

I
Ira Weiny 已提交
867 868 869 870 871
/**
 * ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
 * @agent: the agent in question
 * @return: true if agent is performing rmpp, false otherwise.
 */
872
int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
I
Ira Weiny 已提交
873

L
Linus Torvalds 已提交
874
#endif /* IB_MAD_H */