mad.c 92.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 4
 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
 * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
5
 * Copyright (c) 2009 HNR Consulting. All rights reserved.
I
Ira Weiny 已提交
6
 * Copyright (c) 2014 Intel Corporation.  All rights reserved.
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
37 38 39

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

L
Linus Torvalds 已提交
40
#include <linux/dma-mapping.h>
41
#include <linux/slab.h>
42
#include <linux/module.h>
43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45

#include "mad_priv.h"
46
#include "mad_rmpp.h"
L
Linus Torvalds 已提交
47
#include "smi.h"
I
Ira Weiny 已提交
48
#include "opa_smi.h"
L
Linus Torvalds 已提交
49
#include "agent.h"
50
#include "core_priv.h"
L
Linus Torvalds 已提交
51

52 53
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
54 55 56 57 58 59

module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");

L
Linus Torvalds 已提交
60 61 62 63
static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;

/* Port list lock */
64
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
L
Linus Torvalds 已提交
65 66 67 68 69 70 71

/* Forward declarations */
static int method_in_use(struct ib_mad_mgmt_method_table **method,
			 struct ib_mad_reg_req *mad_reg_req);
static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
static struct ib_mad_agent_private *find_mad_agent(
					struct ib_mad_port_private *port_priv,
72
					const struct ib_mad_hdr *mad);
L
Linus Torvalds 已提交
73 74 75
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
				    struct ib_mad_private *mad);
static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
D
David Howells 已提交
76 77
static void timeout_sends(struct work_struct *work);
static void local_completions(struct work_struct *work);
L
Linus Torvalds 已提交
78 79 80 81 82
static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			      struct ib_mad_agent_private *agent_priv,
			      u8 mgmt_class);
static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			   struct ib_mad_agent_private *agent_priv);
C
Christoph Hellwig 已提交
83 84 85
static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
			      struct ib_wc *wc);
static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
L
Linus Torvalds 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

/*
 * Returns a ib_mad_port_private structure or NULL for a device/port
 * Assumes ib_mad_port_list_lock is being held
 */
static inline struct ib_mad_port_private *
__ib_get_mad_port(struct ib_device *device, int port_num)
{
	struct ib_mad_port_private *entry;

	list_for_each_entry(entry, &ib_mad_port_list, port_list) {
		if (entry->device == device && entry->port_num == port_num)
			return entry;
	}
	return NULL;
}

/*
 * Wrapper function to return a ib_mad_port_private structure or NULL
 * for a device/port
 */
static inline struct ib_mad_port_private *
ib_get_mad_port(struct ib_device *device, int port_num)
{
	struct ib_mad_port_private *entry;
	unsigned long flags;

	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	entry = __ib_get_mad_port(device, port_num);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

	return entry;
}

static inline u8 convert_mgmt_class(u8 mgmt_class)
{
	/* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
	return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
		0 : mgmt_class;
}

static int get_spl_qp_index(enum ib_qp_type qp_type)
{
	switch (qp_type)
	{
	case IB_QPT_SMI:
		return 0;
	case IB_QPT_GSI:
		return 1;
	default:
		return -1;
	}
}

static int vendor_class_index(u8 mgmt_class)
{
	return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
}

static int is_vendor_class(u8 mgmt_class)
{
	if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
	    (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
		return 0;
	return 1;
}

static int is_vendor_oui(char *oui)
{
	if (oui[0] || oui[1] || oui[2])
		return 1;
	return 0;
}

static int is_vendor_method_in_use(
		struct ib_mad_mgmt_vendor_class *vendor_class,
		struct ib_mad_reg_req *mad_reg_req)
{
	struct ib_mad_mgmt_method_table *method;
	int i;

	for (i = 0; i < MAX_MGMT_OUI; i++) {
		if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
			method = vendor_class->method_table[i];
			if (method) {
				if (method_in_use(&method, mad_reg_req))
					return 1;
				else
					break;
			}
		}
	}
	return 0;
}

181
int ib_response_mad(const struct ib_mad_hdr *hdr)
182
{
183 184 185 186
	return ((hdr->method & IB_MGMT_METHOD_RESP) ||
		(hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
		((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
		 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
187 188 189
}
EXPORT_SYMBOL(ib_response_mad);

L
Linus Torvalds 已提交
190 191 192 193 194 195 196 197 198 199
/*
 * ib_register_mad_agent - Register to send/receive MADs
 */
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   struct ib_mad_reg_req *mad_reg_req,
					   u8 rmpp_version,
					   ib_mad_send_handler send_handler,
					   ib_mad_recv_handler recv_handler,
200 201
					   void *context,
					   u32 registration_flags)
L
Linus Torvalds 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
{
	struct ib_mad_port_private *port_priv;
	struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_reg_req *reg_req = NULL;
	struct ib_mad_mgmt_class_table *class;
	struct ib_mad_mgmt_vendor_class_table *vendor;
	struct ib_mad_mgmt_vendor_class *vendor_class;
	struct ib_mad_mgmt_method_table *method;
	int ret2, qpn;
	unsigned long flags;
	u8 mgmt_class, vclass;

	/* Validate parameters */
	qpn = get_spl_qp_index(qp_type);
217 218 219 220
	if (qpn == -1) {
		dev_notice(&device->dev,
			   "ib_register_mad_agent: invalid QP Type %d\n",
			   qp_type);
L
Linus Torvalds 已提交
221
		goto error1;
222
	}
L
Linus Torvalds 已提交
223

224 225 226 227
	if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
		dev_notice(&device->dev,
			   "ib_register_mad_agent: invalid RMPP Version %u\n",
			   rmpp_version);
228
		goto error1;
229
	}
L
Linus Torvalds 已提交
230 231 232

	/* Validate MAD registration request if supplied */
	if (mad_reg_req) {
233 234 235 236
		if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
			dev_notice(&device->dev,
				   "ib_register_mad_agent: invalid Class Version %u\n",
				   mad_reg_req->mgmt_class_version);
L
Linus Torvalds 已提交
237
			goto error1;
238 239 240 241
		}
		if (!recv_handler) {
			dev_notice(&device->dev,
				   "ib_register_mad_agent: no recv_handler\n");
L
Linus Torvalds 已提交
242
			goto error1;
243
		}
L
Linus Torvalds 已提交
244 245 246 247 248 249
		if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
			/*
			 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
			 * one in this range currently allowed
			 */
			if (mad_reg_req->mgmt_class !=
250 251 252 253
			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
				dev_notice(&device->dev,
					   "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
					   mad_reg_req->mgmt_class);
L
Linus Torvalds 已提交
254
				goto error1;
255
			}
L
Linus Torvalds 已提交
256 257 258 259 260
		} else if (mad_reg_req->mgmt_class == 0) {
			/*
			 * Class 0 is reserved in IBA and is used for
			 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
			 */
261 262
			dev_notice(&device->dev,
				   "ib_register_mad_agent: Invalid Mgmt Class 0\n");
L
Linus Torvalds 已提交
263 264 265 266 267 268
			goto error1;
		} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
			/*
			 * If class is in "new" vendor range,
			 * ensure supplied OUI is not zero
			 */
269 270 271 272
			if (!is_vendor_oui(mad_reg_req->oui)) {
				dev_notice(&device->dev,
					   "ib_register_mad_agent: No OUI specified for class 0x%x\n",
					   mad_reg_req->mgmt_class);
L
Linus Torvalds 已提交
273
				goto error1;
274
			}
L
Linus Torvalds 已提交
275
		}
276
		/* Make sure class supplied is consistent with RMPP */
277
		if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
278 279 280 281
			if (rmpp_version) {
				dev_notice(&device->dev,
					   "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
					   mad_reg_req->mgmt_class);
282
				goto error1;
283
			}
284
		}
I
Ira Weiny 已提交
285

L
Linus Torvalds 已提交
286 287 288 289 290
		/* Make sure class supplied is consistent with QP type */
		if (qp_type == IB_QPT_SMI) {
			if ((mad_reg_req->mgmt_class !=
					IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
			    (mad_reg_req->mgmt_class !=
291 292 293 294
					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
				dev_notice(&device->dev,
					   "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
					   mad_reg_req->mgmt_class);
L
Linus Torvalds 已提交
295
				goto error1;
296
			}
L
Linus Torvalds 已提交
297 298 299 300
		} else {
			if ((mad_reg_req->mgmt_class ==
					IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
			    (mad_reg_req->mgmt_class ==
301 302 303 304
					IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
				dev_notice(&device->dev,
					   "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
					   mad_reg_req->mgmt_class);
L
Linus Torvalds 已提交
305
				goto error1;
306
			}
L
Linus Torvalds 已提交
307 308 309 310 311
		}
	} else {
		/* No registration request supplied */
		if (!send_handler)
			goto error1;
I
Ira Weiny 已提交
312 313
		if (registration_flags & IB_MAD_USER_RMPP)
			goto error1;
L
Linus Torvalds 已提交
314 315 316 317 318
	}

	/* Validate device and port */
	port_priv = ib_get_mad_port(device, port_num);
	if (!port_priv) {
319 320 321
		dev_notice(&device->dev,
			   "ib_register_mad_agent: Invalid port %d\n",
			   port_num);
L
Linus Torvalds 已提交
322 323 324 325
		ret = ERR_PTR(-ENODEV);
		goto error1;
	}

326 327 328
	/* Verify the QP requested is supported.  For example, Ethernet devices
	 * will not have QP0 */
	if (!port_priv->qp_info[qpn].qp) {
329 330
		dev_notice(&device->dev,
			   "ib_register_mad_agent: QP %d not supported\n", qpn);
331 332 333 334
		ret = ERR_PTR(-EPROTONOSUPPORT);
		goto error1;
	}

L
Linus Torvalds 已提交
335
	/* Allocate structures */
R
Roland Dreier 已提交
336
	mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
L
Linus Torvalds 已提交
337 338 339 340
	if (!mad_agent_priv) {
		ret = ERR_PTR(-ENOMEM);
		goto error1;
	}
341

L
Linus Torvalds 已提交
342
	if (mad_reg_req) {
343
		reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
L
Linus Torvalds 已提交
344 345
		if (!reg_req) {
			ret = ERR_PTR(-ENOMEM);
346
			goto error3;
L
Linus Torvalds 已提交
347 348 349 350 351 352
		}
	}

	/* Now, fill in the various structures */
	mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
	mad_agent_priv->reg_req = reg_req;
353
	mad_agent_priv->agent.rmpp_version = rmpp_version;
L
Linus Torvalds 已提交
354 355 356 357 358 359
	mad_agent_priv->agent.device = device;
	mad_agent_priv->agent.recv_handler = recv_handler;
	mad_agent_priv->agent.send_handler = send_handler;
	mad_agent_priv->agent.context = context;
	mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
	mad_agent_priv->agent.port_num = port_num;
360
	mad_agent_priv->agent.flags = registration_flags;
361 362 363 364 365 366 367 368 369 370
	spin_lock_init(&mad_agent_priv->lock);
	INIT_LIST_HEAD(&mad_agent_priv->send_list);
	INIT_LIST_HEAD(&mad_agent_priv->wait_list);
	INIT_LIST_HEAD(&mad_agent_priv->done_list);
	INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
	INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
	INIT_LIST_HEAD(&mad_agent_priv->local_list);
	INIT_WORK(&mad_agent_priv->local_work, local_completions);
	atomic_set(&mad_agent_priv->refcount, 1);
	init_completion(&mad_agent_priv->comp);
L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388

	spin_lock_irqsave(&port_priv->reg_lock, flags);
	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;

	/*
	 * Make sure MAD registration (if supplied)
	 * is non overlapping with any existing ones
	 */
	if (mad_reg_req) {
		mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
		if (!is_vendor_class(mgmt_class)) {
			class = port_priv->version[mad_reg_req->
						   mgmt_class_version].class;
			if (class) {
				method = class->method_table[mgmt_class];
				if (method) {
					if (method_in_use(&method,
							   mad_reg_req))
389
						goto error4;
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
				}
			}
			ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
						  mgmt_class);
		} else {
			/* "New" vendor class range */
			vendor = port_priv->version[mad_reg_req->
						    mgmt_class_version].vendor;
			if (vendor) {
				vclass = vendor_class_index(mgmt_class);
				vendor_class = vendor->vendor_class[vclass];
				if (vendor_class) {
					if (is_vendor_method_in_use(
							vendor_class,
							mad_reg_req))
405
						goto error4;
L
Linus Torvalds 已提交
406 407 408 409 410 411
				}
			}
			ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
		}
		if (ret2) {
			ret = ERR_PTR(ret2);
412
			goto error4;
L
Linus Torvalds 已提交
413 414 415 416 417 418 419 420 421
		}
	}

	/* Add mad agent into port's agent list */
	list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
	spin_unlock_irqrestore(&port_priv->reg_lock, flags);

	return &mad_agent_priv->agent;

422
error4:
L
Linus Torvalds 已提交
423 424
	spin_unlock_irqrestore(&port_priv->reg_lock, flags);
	kfree(reg_req);
425
error3:
426
	kfree(mad_agent_priv);
L
Linus Torvalds 已提交
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
error1:
	return ret;
}
EXPORT_SYMBOL(ib_register_mad_agent);

static inline int is_snooping_sends(int mad_snoop_flags)
{
	return (mad_snoop_flags &
		(/*IB_MAD_SNOOP_POSTED_SENDS |
		 IB_MAD_SNOOP_RMPP_SENDS |*/
		 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
		 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
}

static inline int is_snooping_recvs(int mad_snoop_flags)
{
	return (mad_snoop_flags &
		(IB_MAD_SNOOP_RECVS /*|
		 IB_MAD_SNOOP_RMPP_RECVS*/));
}

static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
				struct ib_mad_snoop_private *mad_snoop_priv)
{
	struct ib_mad_snoop_private **new_snoop_table;
	unsigned long flags;
	int i;

	spin_lock_irqsave(&qp_info->snoop_lock, flags);
	/* Check for empty slot in array. */
	for (i = 0; i < qp_info->snoop_table_size; i++)
		if (!qp_info->snoop_table[i])
			break;

	if (i == qp_info->snoop_table_size) {
		/* Grow table. */
463 464 465 466
		new_snoop_table = krealloc(qp_info->snoop_table,
					   sizeof mad_snoop_priv *
					   (qp_info->snoop_table_size + 1),
					   GFP_ATOMIC);
L
Linus Torvalds 已提交
467 468 469 470
		if (!new_snoop_table) {
			i = -ENOMEM;
			goto out;
		}
471

L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
		qp_info->snoop_table = new_snoop_table;
		qp_info->snoop_table_size++;
	}
	qp_info->snoop_table[i] = mad_snoop_priv;
	atomic_inc(&qp_info->snoop_count);
out:
	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
	return i;
}

struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   int mad_snoop_flags,
					   ib_mad_snoop_handler snoop_handler,
					   ib_mad_recv_handler recv_handler,
					   void *context)
{
	struct ib_mad_port_private *port_priv;
	struct ib_mad_agent *ret;
	struct ib_mad_snoop_private *mad_snoop_priv;
	int qpn;

	/* Validate parameters */
	if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
	    (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
		ret = ERR_PTR(-EINVAL);
		goto error1;
	}
	qpn = get_spl_qp_index(qp_type);
	if (qpn == -1) {
		ret = ERR_PTR(-EINVAL);
		goto error1;
	}
	port_priv = ib_get_mad_port(device, port_num);
	if (!port_priv) {
		ret = ERR_PTR(-ENODEV);
		goto error1;
	}
	/* Allocate structures */
R
Roland Dreier 已提交
512
	mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526
	if (!mad_snoop_priv) {
		ret = ERR_PTR(-ENOMEM);
		goto error1;
	}

	/* Now, fill in the various structures */
	mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
	mad_snoop_priv->agent.device = device;
	mad_snoop_priv->agent.recv_handler = recv_handler;
	mad_snoop_priv->agent.snoop_handler = snoop_handler;
	mad_snoop_priv->agent.context = context;
	mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
	mad_snoop_priv->agent.port_num = port_num;
	mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
S
Sean Hefty 已提交
527
	init_completion(&mad_snoop_priv->comp);
L
Linus Torvalds 已提交
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
	mad_snoop_priv->snoop_index = register_snoop_agent(
						&port_priv->qp_info[qpn],
						mad_snoop_priv);
	if (mad_snoop_priv->snoop_index < 0) {
		ret = ERR_PTR(mad_snoop_priv->snoop_index);
		goto error2;
	}

	atomic_set(&mad_snoop_priv->refcount, 1);
	return &mad_snoop_priv->agent;

error2:
	kfree(mad_snoop_priv);
error1:
	return ret;
}
EXPORT_SYMBOL(ib_register_mad_snoop);

S
Sean Hefty 已提交
546 547 548 549 550 551 552 553 554 555 556 557
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
	if (atomic_dec_and_test(&mad_agent_priv->refcount))
		complete(&mad_agent_priv->comp);
}

static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
{
	if (atomic_dec_and_test(&mad_snoop_priv->refcount))
		complete(&mad_snoop_priv->comp);
}

L
Linus Torvalds 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
	struct ib_mad_port_private *port_priv;
	unsigned long flags;

	/* Note that we could still be handling received MADs */

	/*
	 * Canceling all sends results in dropping received response
	 * MADs, preventing us from queuing additional work
	 */
	cancel_mads(mad_agent_priv);
	port_priv = mad_agent_priv->qp_info->port_priv;
	cancel_delayed_work(&mad_agent_priv->timed_work);

	spin_lock_irqsave(&port_priv->reg_lock, flags);
	remove_mad_reg_req(mad_agent_priv);
	list_del(&mad_agent_priv->agent_list);
	spin_unlock_irqrestore(&port_priv->reg_lock, flags);

578
	flush_workqueue(port_priv->wq);
579
	ib_cancel_rmpp_recvs(mad_agent_priv);
L
Linus Torvalds 已提交
580

S
Sean Hefty 已提交
581 582
	deref_mad_agent(mad_agent_priv);
	wait_for_completion(&mad_agent_priv->comp);
L
Linus Torvalds 已提交
583

584
	kfree(mad_agent_priv->reg_req);
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598
	kfree(mad_agent_priv);
}

static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
{
	struct ib_mad_qp_info *qp_info;
	unsigned long flags;

	qp_info = mad_snoop_priv->qp_info;
	spin_lock_irqsave(&qp_info->snoop_lock, flags);
	qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
	atomic_dec(&qp_info->snoop_count);
	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);

S
Sean Hefty 已提交
599 600
	deref_snoop_agent(mad_snoop_priv);
	wait_for_completion(&mad_snoop_priv->comp);
L
Linus Torvalds 已提交
601 602 603 604 605 606 607

	kfree(mad_snoop_priv);
}

/*
 * ib_unregister_mad_agent - Unregisters a client from using MAD services
 */
608
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
L
Linus Torvalds 已提交
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_snoop_private *mad_snoop_priv;

	/* If the TID is zero, the agent can only snoop. */
	if (mad_agent->hi_tid) {
		mad_agent_priv = container_of(mad_agent,
					      struct ib_mad_agent_private,
					      agent);
		unregister_mad_agent(mad_agent_priv);
	} else {
		mad_snoop_priv = container_of(mad_agent,
					      struct ib_mad_snoop_private,
					      agent);
		unregister_mad_snoop(mad_snoop_priv);
	}
}
EXPORT_SYMBOL(ib_unregister_mad_agent);

static void dequeue_mad(struct ib_mad_list_head *mad_list)
{
	struct ib_mad_queue *mad_queue;
	unsigned long flags;

	BUG_ON(!mad_list->mad_queue);
	mad_queue = mad_list->mad_queue;
	spin_lock_irqsave(&mad_queue->lock, flags);
	list_del(&mad_list->list);
	mad_queue->count--;
	spin_unlock_irqrestore(&mad_queue->lock, flags);
}

static void snoop_send(struct ib_mad_qp_info *qp_info,
642
		       struct ib_mad_send_buf *send_buf,
L
Linus Torvalds 已提交
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
		       struct ib_mad_send_wc *mad_send_wc,
		       int mad_snoop_flags)
{
	struct ib_mad_snoop_private *mad_snoop_priv;
	unsigned long flags;
	int i;

	spin_lock_irqsave(&qp_info->snoop_lock, flags);
	for (i = 0; i < qp_info->snoop_table_size; i++) {
		mad_snoop_priv = qp_info->snoop_table[i];
		if (!mad_snoop_priv ||
		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
			continue;

		atomic_inc(&mad_snoop_priv->refcount);
		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
		mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
660
						    send_buf, mad_send_wc);
S
Sean Hefty 已提交
661
		deref_snoop_agent(mad_snoop_priv);
L
Linus Torvalds 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
		spin_lock_irqsave(&qp_info->snoop_lock, flags);
	}
	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
}

static void snoop_recv(struct ib_mad_qp_info *qp_info,
		       struct ib_mad_recv_wc *mad_recv_wc,
		       int mad_snoop_flags)
{
	struct ib_mad_snoop_private *mad_snoop_priv;
	unsigned long flags;
	int i;

	spin_lock_irqsave(&qp_info->snoop_lock, flags);
	for (i = 0; i < qp_info->snoop_table_size; i++) {
		mad_snoop_priv = qp_info->snoop_table[i];
		if (!mad_snoop_priv ||
		    !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
			continue;

		atomic_inc(&mad_snoop_priv->refcount);
		spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
684
		mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
L
Linus Torvalds 已提交
685
						   mad_recv_wc);
S
Sean Hefty 已提交
686
		deref_snoop_agent(mad_snoop_priv);
L
Linus Torvalds 已提交
687 688 689 690 691
		spin_lock_irqsave(&qp_info->snoop_lock, flags);
	}
	spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
}

C
Christoph Hellwig 已提交
692 693
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
		u16 pkey_index, u8 port_num, struct ib_wc *wc)
L
Linus Torvalds 已提交
694 695
{
	memset(wc, 0, sizeof *wc);
C
Christoph Hellwig 已提交
696
	wc->wr_cqe = cqe;
L
Linus Torvalds 已提交
697 698 699 700 701
	wc->status = IB_WC_SUCCESS;
	wc->opcode = IB_WC_RECV;
	wc->pkey_index = pkey_index;
	wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
	wc->src_qp = IB_QP0;
702
	wc->qp = qp;
L
Linus Torvalds 已提交
703 704 705 706 707 708
	wc->slid = slid;
	wc->sl = 0;
	wc->dlid_path_bits = 0;
	wc->port_num = port_num;
}

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
static size_t mad_priv_size(const struct ib_mad_private *mp)
{
	return sizeof(struct ib_mad_private) + mp->mad_size;
}

static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
{
	size_t size = sizeof(struct ib_mad_private) + mad_size;
	struct ib_mad_private *ret = kzalloc(size, flags);

	if (ret)
		ret->mad_size = mad_size;

	return ret;
}

static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
{
	return rdma_max_mad_size(port_priv->device, port_priv->port_num);
}

static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
{
	return sizeof(struct ib_grh) + mp->mad_size;
}

L
Linus Torvalds 已提交
735 736 737 738 739 740
/*
 * Return 0 if SMP is to be sent
 * Return 1 if SMP was consumed locally (whether or not solicited)
 * Return < 0 if error
 */
static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
741
				  struct ib_mad_send_wr_private *mad_send_wr)
L
Linus Torvalds 已提交
742
{
743
	int ret = 0;
744
	struct ib_smp *smp = mad_send_wr->send_buf.mad;
I
Ira Weiny 已提交
745
	struct opa_smp *opa_smp = (struct opa_smp *)smp;
L
Linus Torvalds 已提交
746 747 748 749 750 751
	unsigned long flags;
	struct ib_mad_local_private *local;
	struct ib_mad_private *mad_priv;
	struct ib_mad_port_private *port_priv;
	struct ib_mad_agent_private *recv_mad_agent = NULL;
	struct ib_device *device = mad_agent_priv->agent.device;
752
	u8 port_num;
L
Linus Torvalds 已提交
753
	struct ib_wc mad_wc;
C
Christoph Hellwig 已提交
754
	struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
755
	size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
756
	u16 out_mad_pkey_index = 0;
I
Ira Weiny 已提交
757 758 759
	u16 drslid;
	bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
				    mad_agent_priv->qp_info->port_priv->port_num);
L
Linus Torvalds 已提交
760

761
	if (rdma_cap_ib_switch(device) &&
762
	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
C
Christoph Hellwig 已提交
763
		port_num = send_wr->port_num;
764 765 766
	else
		port_num = mad_agent_priv->agent.port_num;

767 768 769 770 771 772
	/*
	 * Directed route handling starts if the initial LID routed part of
	 * a request or the ending LID routed part of a response is empty.
	 * If we are at the start of the LID routed part, don't update the
	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
	 */
773
	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
I
Ira Weiny 已提交
774 775 776 777 778
		u32 opa_drslid;

		if ((opa_get_smp_direction(opa_smp)
		     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
		     OPA_LID_PERMISSIVE &&
779 780
		     opa_smi_handle_dr_smp_send(opa_smp,
						rdma_cap_ib_switch(device),
I
Ira Weiny 已提交
781 782 783 784 785 786
						port_num) == IB_SMI_DISCARD) {
			ret = -EINVAL;
			dev_err(&device->dev, "OPA Invalid directed route\n");
			goto out;
		}
		opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
787
		if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
I
Ira Weiny 已提交
788 789 790 791 792 793 794
		    opa_drslid & 0xffff0000) {
			ret = -EINVAL;
			dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
			       opa_drslid);
			goto out;
		}
		drslid = (u16)(opa_drslid & 0x0000ffff);
795

I
Ira Weiny 已提交
796 797 798 799 800 801 802
		/* Check to post send on QP or process locally */
		if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
		    opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
			goto out;
	} else {
		if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
		     IB_LID_PERMISSIVE &&
803
		     smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
I
Ira Weiny 已提交
804 805 806 807 808 809 810 811 812 813 814 815
		     IB_SMI_DISCARD) {
			ret = -EINVAL;
			dev_err(&device->dev, "Invalid directed route\n");
			goto out;
		}
		drslid = be16_to_cpu(smp->dr_slid);

		/* Check to post send on QP or process locally */
		if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
		    smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
			goto out;
	}
L
Linus Torvalds 已提交
816 817 818 819 820 821 822 823

	local = kmalloc(sizeof *local, GFP_ATOMIC);
	if (!local) {
		ret = -ENOMEM;
		goto out;
	}
	local->mad_priv = NULL;
	local->recv_mad_agent = NULL;
824
	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
L
Linus Torvalds 已提交
825 826 827 828 829 830
	if (!mad_priv) {
		ret = -ENOMEM;
		kfree(local);
		goto out;
	}

831
	build_smp_wc(mad_agent_priv->agent.qp,
C
Christoph Hellwig 已提交
832
		     send_wr->wr.wr_cqe, drslid,
C
Christoph Hellwig 已提交
833 834
		     send_wr->pkey_index,
		     send_wr->port_num, &mad_wc);
L
Linus Torvalds 已提交
835

I
Ira Weiny 已提交
836 837 838 839 840 841
	if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
		mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
					+ mad_send_wr->send_buf.data_len
					+ sizeof(struct ib_grh);
	}

L
Linus Torvalds 已提交
842 843
	/* No GRH for DR SMP */
	ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
844 845 846
				  (const struct ib_mad_hdr *)smp, mad_size,
				  (struct ib_mad_hdr *)mad_priv->mad,
				  &mad_size, &out_mad_pkey_index);
L
Linus Torvalds 已提交
847 848 849
	switch (ret)
	{
	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
850
		if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
L
Linus Torvalds 已提交
851 852 853 854 855 856 857 858 859
		    mad_agent_priv->agent.recv_handler) {
			local->mad_priv = mad_priv;
			local->recv_mad_agent = mad_agent_priv;
			/*
			 * Reference MAD agent until receive
			 * side of local completion handled
			 */
			atomic_inc(&mad_agent_priv->refcount);
		} else
860
			kfree(mad_priv);
L
Linus Torvalds 已提交
861 862
		break;
	case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
863
		kfree(mad_priv);
864
		break;
L
Linus Torvalds 已提交
865 866 867 868 869
	case IB_MAD_RESULT_SUCCESS:
		/* Treat like an incoming receive MAD */
		port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
					    mad_agent_priv->agent.port_num);
		if (port_priv) {
870
			memcpy(mad_priv->mad, smp, mad_priv->mad_size);
L
Linus Torvalds 已提交
871
			recv_mad_agent = find_mad_agent(port_priv,
872
						        (const struct ib_mad_hdr *)mad_priv->mad);
L
Linus Torvalds 已提交
873 874
		}
		if (!port_priv || !recv_mad_agent) {
875 876 877 878
			/*
			 * No receiving agent so drop packet and
			 * generate send completion.
			 */
879
			kfree(mad_priv);
880
			break;
L
Linus Torvalds 已提交
881 882 883 884 885
		}
		local->mad_priv = mad_priv;
		local->recv_mad_agent = recv_mad_agent;
		break;
	default:
886
		kfree(mad_priv);
L
Linus Torvalds 已提交
887 888 889 890 891
		kfree(local);
		ret = -EINVAL;
		goto out;
	}

892
	local->mad_send_wr = mad_send_wr;
I
Ira Weiny 已提交
893
	if (opa) {
C
Christoph Hellwig 已提交
894
		local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
I
Ira Weiny 已提交
895 896
		local->return_wc_byte_len = mad_size;
	}
L
Linus Torvalds 已提交
897 898 899 900 901 902 903
	/* Reference MAD agent until send side of local completion handled */
	atomic_inc(&mad_agent_priv->refcount);
	/* Queue local completion to local list */
	spin_lock_irqsave(&mad_agent_priv->lock, flags);
	list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
	queue_work(mad_agent_priv->qp_info->port_priv->wq,
904
		   &mad_agent_priv->local_work);
L
Linus Torvalds 已提交
905 906 907 908 909
	ret = 1;
out:
	return ret;
}

910
static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
911 912 913
{
	int seg_size, pad;

914
	seg_size = mad_size - hdr_len;
915 916
	if (data_len && seg_size) {
		pad = seg_size - data_len % seg_size;
917
		return pad == seg_size ? 0 : pad;
918
	} else
919 920 921 922 923 924 925 926 927 928 929 930 931 932
		return seg_size;
}

static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
{
	struct ib_rmpp_segment *s, *t;

	list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
		list_del(&s->list);
		kfree(s);
	}
}

static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
933
				size_t mad_size, gfp_t gfp_mask)
934 935 936 937 938 939
{
	struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
	struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
	struct ib_rmpp_segment *seg = NULL;
	int left, seg_size, pad;

940 941
	send_buf->seg_size = mad_size - send_buf->hdr_len;
	send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	seg_size = send_buf->seg_size;
	pad = send_wr->pad;

	/* Allocate data segments. */
	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
		if (!seg) {
			free_send_rmpp_list(send_wr);
			return -ENOMEM;
		}
		seg->num = ++send_buf->seg_count;
		list_add_tail(&seg->list, &send_wr->rmpp_list);
	}

	/* Zero any padding */
	if (pad)
		memset(seg->data + seg_size - pad, 0, pad);

	rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
					  agent.rmpp_version;
	rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
	ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);

	send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
					struct ib_rmpp_segment, list);
	send_wr->last_ack_seg = send_wr->cur_seg;
	return 0;
969 970
}

971
int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
I
Ira Weiny 已提交
972 973 974 975 976
{
	return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
}
EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);

977 978
struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
					    u32 remote_qpn, u16 pkey_index,
979
					    int rmpp_active,
980
					    int hdr_len, int data_len,
981 982
					    gfp_t gfp_mask,
					    u8 base_version)
983 984
{
	struct ib_mad_agent_private *mad_agent_priv;
985
	struct ib_mad_send_wr_private *mad_send_wr;
986
	int pad, message_size, ret, size;
987
	void *buf;
988 989
	size_t mad_size;
	bool opa;
990

991 992
	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
				      agent);
993 994 995 996 997 998 999 1000 1001

	opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);

	if (opa && base_version == OPA_MGMT_BASE_VERSION)
		mad_size = sizeof(struct opa_mad);
	else
		mad_size = sizeof(struct ib_mad);

	pad = get_pad_size(hdr_len, data_len, mad_size);
1002
	message_size = hdr_len + data_len + pad;
1003

I
Ira Weiny 已提交
1004
	if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1005
		if (!rmpp_active && message_size > mad_size)
I
Ira Weiny 已提交
1006 1007
			return ERR_PTR(-EINVAL);
	} else
1008
		if (rmpp_active || message_size > mad_size)
I
Ira Weiny 已提交
1009
			return ERR_PTR(-EINVAL);
1010

1011
	size = rmpp_active ? hdr_len : mad_size;
1012
	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1013 1014
	if (!buf)
		return ERR_PTR(-ENOMEM);
1015

1016 1017
	mad_send_wr = buf + size;
	INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1018
	mad_send_wr->send_buf.mad = buf;
1019 1020 1021
	mad_send_wr->send_buf.hdr_len = hdr_len;
	mad_send_wr->send_buf.data_len = data_len;
	mad_send_wr->pad = pad;
1022 1023

	mad_send_wr->mad_agent_priv = mad_agent_priv;
1024
	mad_send_wr->sg_list[0].length = hdr_len;
1025
	mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1026 1027 1028 1029 1030 1031 1032 1033

	/* OPA MADs don't have to be the full 2048 bytes */
	if (opa && base_version == OPA_MGMT_BASE_VERSION &&
	    data_len < mad_size - hdr_len)
		mad_send_wr->sg_list[1].length = data_len;
	else
		mad_send_wr->sg_list[1].length = mad_size - hdr_len;

1034
	mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1035

C
Christoph Hellwig 已提交
1036 1037 1038
	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;

	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
C
Christoph Hellwig 已提交
1039 1040 1041 1042 1043 1044 1045
	mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
	mad_send_wr->send_wr.wr.num_sge = 2;
	mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
	mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
	mad_send_wr->send_wr.remote_qpn = remote_qpn;
	mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
	mad_send_wr->send_wr.pkey_index = pkey_index;
1046 1047

	if (rmpp_active) {
1048
		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1049 1050 1051 1052
		if (ret) {
			kfree(buf);
			return ERR_PTR(ret);
		}
1053 1054
	}

1055
	mad_send_wr->send_buf.mad_agent = mad_agent;
1056
	atomic_inc(&mad_agent_priv->refcount);
1057
	return &mad_send_wr->send_buf;
1058 1059 1060
}
EXPORT_SYMBOL(ib_create_send_mad);

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
int ib_get_mad_data_offset(u8 mgmt_class)
{
	if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
		return IB_MGMT_SA_HDR;
	else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
		 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
		 (mgmt_class == IB_MGMT_CLASS_BIS))
		return IB_MGMT_DEVICE_HDR;
	else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
		 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
		return IB_MGMT_VENDOR_HDR;
	else
		return IB_MGMT_MAD_HDR;
}
EXPORT_SYMBOL(ib_get_mad_data_offset);

int ib_is_mad_class_rmpp(u8 mgmt_class)
{
	if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
	    (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
	    (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
	    (mgmt_class == IB_MGMT_CLASS_BIS) ||
	    ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
	     (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
		return 1;
	return 0;
}
EXPORT_SYMBOL(ib_is_mad_class_rmpp);

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
{
	struct ib_mad_send_wr_private *mad_send_wr;
	struct list_head *list;

	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
				   send_buf);
	list = &mad_send_wr->cur_seg->list;

	if (mad_send_wr->cur_seg->num < seg_num) {
		list_for_each_entry(mad_send_wr->cur_seg, list, list)
			if (mad_send_wr->cur_seg->num == seg_num)
				break;
	} else if (mad_send_wr->cur_seg->num > seg_num) {
		list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
			if (mad_send_wr->cur_seg->num == seg_num)
				break;
	}
	return mad_send_wr->cur_seg->data;
}
EXPORT_SYMBOL(ib_get_rmpp_segment);

static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
{
	if (mad_send_wr->send_buf.seg_count)
		return ib_get_rmpp_segment(&mad_send_wr->send_buf,
					   mad_send_wr->seg_num);
	else
		return mad_send_wr->send_buf.mad +
		       mad_send_wr->send_buf.hdr_len;
}

1122 1123 1124
void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
{
	struct ib_mad_agent_private *mad_agent_priv;
1125
	struct ib_mad_send_wr_private *mad_send_wr;
1126 1127 1128

	mad_agent_priv = container_of(send_buf->mad_agent,
				      struct ib_mad_agent_private, agent);
1129 1130
	mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
				   send_buf);
1131

1132 1133
	free_send_rmpp_list(mad_send_wr);
	kfree(send_buf->mad);
S
Sean Hefty 已提交
1134
	deref_mad_agent(mad_agent_priv);
1135 1136 1137
}
EXPORT_SYMBOL(ib_free_send_mad);

1138
int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
L
Linus Torvalds 已提交
1139 1140
{
	struct ib_mad_qp_info *qp_info;
1141
	struct list_head *list;
1142 1143 1144
	struct ib_send_wr *bad_send_wr;
	struct ib_mad_agent *mad_agent;
	struct ib_sge *sge;
L
Linus Torvalds 已提交
1145 1146 1147
	unsigned long flags;
	int ret;

1148
	/* Set WR ID to find mad_send_wr upon completion */
1149
	qp_info = mad_send_wr->mad_agent_priv->qp_info;
L
Linus Torvalds 已提交
1150
	mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
C
Christoph Hellwig 已提交
1151 1152
	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
L
Linus Torvalds 已提交
1153

1154 1155
	mad_agent = mad_send_wr->send_buf.mad_agent;
	sge = mad_send_wr->sg_list;
1156 1157 1158 1159
	sge[0].addr = ib_dma_map_single(mad_agent->device,
					mad_send_wr->send_buf.mad,
					sge[0].length,
					DMA_TO_DEVICE);
1160 1161 1162
	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
		return -ENOMEM;

1163 1164 1165 1166 1167 1168
	mad_send_wr->header_mapping = sge[0].addr;

	sge[1].addr = ib_dma_map_single(mad_agent->device,
					ib_get_payload(mad_send_wr),
					sge[1].length,
					DMA_TO_DEVICE);
1169 1170 1171 1172 1173 1174
	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
		ib_dma_unmap_single(mad_agent->device,
				    mad_send_wr->header_mapping,
				    sge[0].length, DMA_TO_DEVICE);
		return -ENOMEM;
	}
1175
	mad_send_wr->payload_mapping = sge[1].addr;
1176

L
Linus Torvalds 已提交
1177
	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1178
	if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
C
Christoph Hellwig 已提交
1179
		ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1180
				   &bad_send_wr);
1181
		list = &qp_info->send_queue.list;
L
Linus Torvalds 已提交
1182 1183
	} else {
		ret = 0;
1184
		list = &qp_info->overflow_list;
L
Linus Torvalds 已提交
1185
	}
1186 1187 1188 1189 1190 1191

	if (!ret) {
		qp_info->send_queue.count++;
		list_add_tail(&mad_send_wr->mad_list.list, list);
	}
	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1192
	if (ret) {
1193 1194 1195 1196 1197 1198
		ib_dma_unmap_single(mad_agent->device,
				    mad_send_wr->header_mapping,
				    sge[0].length, DMA_TO_DEVICE);
		ib_dma_unmap_single(mad_agent->device,
				    mad_send_wr->payload_mapping,
				    sge[1].length, DMA_TO_DEVICE);
1199
	}
L
Linus Torvalds 已提交
1200 1201 1202 1203 1204 1205 1206
	return ret;
}

/*
 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
 *  with the registered client
 */
1207 1208
int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
		     struct ib_mad_send_buf **bad_send_buf)
L
Linus Torvalds 已提交
1209 1210
{
	struct ib_mad_agent_private *mad_agent_priv;
1211 1212 1213 1214
	struct ib_mad_send_buf *next_send_buf;
	struct ib_mad_send_wr_private *mad_send_wr;
	unsigned long flags;
	int ret = -EINVAL;
L
Linus Torvalds 已提交
1215 1216

	/* Walk list of send WRs and post each on send list */
1217
	for (; send_buf; send_buf = next_send_buf) {
L
Linus Torvalds 已提交
1218

1219 1220 1221 1222
		mad_send_wr = container_of(send_buf,
					   struct ib_mad_send_wr_private,
					   send_buf);
		mad_agent_priv = mad_send_wr->mad_agent_priv;
L
Linus Torvalds 已提交
1223

1224 1225 1226 1227 1228
		if (!send_buf->mad_agent->send_handler ||
		    (send_buf->timeout_ms &&
		     !send_buf->mad_agent->recv_handler)) {
			ret = -EINVAL;
			goto error;
L
Linus Torvalds 已提交
1229 1230
		}

1231 1232 1233 1234 1235 1236 1237
		if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
			if (mad_agent_priv->agent.rmpp_version) {
				ret = -EINVAL;
				goto error;
			}
		}

L
Linus Torvalds 已提交
1238 1239 1240 1241 1242
		/*
		 * Save pointer to next work request to post in case the
		 * current one completes, and the user modifies the work
		 * request associated with the completion
		 */
1243
		next_send_buf = send_buf->next;
C
Christoph Hellwig 已提交
1244
		mad_send_wr->send_wr.ah = send_buf->ah;
L
Linus Torvalds 已提交
1245

1246 1247 1248 1249
		if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
		    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
			ret = handle_outgoing_dr_smp(mad_agent_priv,
						     mad_send_wr);
L
Linus Torvalds 已提交
1250
			if (ret < 0)		/* error */
1251
				goto error;
L
Linus Torvalds 已提交
1252
			else if (ret == 1)	/* locally consumed */
1253
				continue;
L
Linus Torvalds 已提交
1254 1255
		}

1256
		mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
L
Linus Torvalds 已提交
1257
		/* Timeout will be updated after send completes */
1258
		mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1259 1260 1261
		mad_send_wr->max_retries = send_buf->retries;
		mad_send_wr->retries_left = send_buf->retries;
		send_buf->retries = 0;
1262
		/* Reference for work request to QP + response */
L
Linus Torvalds 已提交
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272
		mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
		mad_send_wr->status = IB_WC_SUCCESS;

		/* Reference MAD agent until send completes */
		atomic_inc(&mad_agent_priv->refcount);
		spin_lock_irqsave(&mad_agent_priv->lock, flags);
		list_add_tail(&mad_send_wr->agent_list,
			      &mad_agent_priv->send_list);
		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);

I
Ira Weiny 已提交
1273
		if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1274 1275 1276 1277 1278 1279
			ret = ib_send_rmpp_mad(mad_send_wr);
			if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
				ret = ib_send_mad(mad_send_wr);
		} else
			ret = ib_send_mad(mad_send_wr);
		if (ret < 0) {
L
Linus Torvalds 已提交
1280 1281 1282 1283 1284
			/* Fail send request */
			spin_lock_irqsave(&mad_agent_priv->lock, flags);
			list_del(&mad_send_wr->agent_list);
			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
			atomic_dec(&mad_agent_priv->refcount);
1285
			goto error;
L
Linus Torvalds 已提交
1286 1287 1288
		}
	}
	return 0;
1289 1290 1291
error:
	if (bad_send_buf)
		*bad_send_buf = send_buf;
L
Linus Torvalds 已提交
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
	return ret;
}
EXPORT_SYMBOL(ib_post_send_mad);

/*
 * ib_free_recv_mad - Returns data buffers used to receive
 *  a MAD to the access layer
 */
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
{
1302
	struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
L
Linus Torvalds 已提交
1303 1304
	struct ib_mad_private_header *mad_priv_hdr;
	struct ib_mad_private *priv;
1305
	struct list_head free_list;
L
Linus Torvalds 已提交
1306

1307 1308
	INIT_LIST_HEAD(&free_list);
	list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
L
Linus Torvalds 已提交
1309

1310 1311 1312 1313
	list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
					&free_list, list) {
		mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
					   recv_buf);
L
Linus Torvalds 已提交
1314 1315 1316 1317 1318
		mad_priv_hdr = container_of(mad_recv_wc,
					    struct ib_mad_private_header,
					    recv_wc);
		priv = container_of(mad_priv_hdr, struct ib_mad_private,
				    header);
1319
		kfree(priv);
L
Linus Torvalds 已提交
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	}
}
EXPORT_SYMBOL(ib_free_recv_mad);

struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
					u8 rmpp_version,
					ib_mad_send_handler send_handler,
					ib_mad_recv_handler recv_handler,
					void *context)
{
	return ERR_PTR(-EINVAL);	/* XXX: for now */
}
EXPORT_SYMBOL(ib_redirect_mad_qp);

int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
		      struct ib_wc *wc)
{
1337 1338
	dev_err(&mad_agent->device->dev,
		"ib_process_mad_wc() not implemented yet\n");
L
Linus Torvalds 已提交
1339 1340 1341 1342 1343 1344 1345 1346 1347
	return 0;
}
EXPORT_SYMBOL(ib_process_mad_wc);

static int method_in_use(struct ib_mad_mgmt_method_table **method,
			 struct ib_mad_reg_req *mad_reg_req)
{
	int i;

1348
	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
L
Linus Torvalds 已提交
1349
		if ((*method)->agent[i]) {
1350
			pr_err("Method %d already in use\n", i);
L
Linus Torvalds 已提交
1351 1352 1353 1354 1355 1356 1357 1358 1359
			return -EINVAL;
		}
	}
	return 0;
}

static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{
	/* Allocate management method table */
R
Roland Dreier 已提交
1360
	*method = kzalloc(sizeof **method, GFP_ATOMIC);
1361
	return (*method) ? 0 : (-ENOMEM);
L
Linus Torvalds 已提交
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
}

/*
 * Check to see if there are any methods still in use
 */
static int check_method_table(struct ib_mad_mgmt_method_table *method)
{
	int i;

	for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
		if (method->agent[i])
			return 1;
	return 0;
}

/*
 * Check to see if there are any method tables for this class still in use
 */
static int check_class_table(struct ib_mad_mgmt_class_table *class)
{
	int i;

	for (i = 0; i < MAX_MGMT_CLASS; i++)
		if (class->method_table[i])
			return 1;
	return 0;
}

static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
{
	int i;

	for (i = 0; i < MAX_MGMT_OUI; i++)
		if (vendor_class->method_table[i])
			return 1;
	return 0;
}

static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1401
			   const char *oui)
L
Linus Torvalds 已提交
1402 1403 1404 1405
{
	int i;

	for (i = 0; i < MAX_MGMT_OUI; i++)
R
Roland Dreier 已提交
1406 1407
		/* Is there matching OUI for this vendor class ? */
		if (!memcmp(vendor_class->oui[i], oui, 3))
L
Linus Torvalds 已提交
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
			return i;

	return -1;
}

static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
{
	int i;

	for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
		if (vendor->vendor_class[i])
			return 1;

	return 0;
}

static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
				     struct ib_mad_agent_private *agent)
{
	int i;

	/* Remove any methods for this mad agent */
	for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
		if (method->agent[i] == agent) {
			method->agent[i] = NULL;
		}
	}
}

static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			      struct ib_mad_agent_private *agent_priv,
			      u8 mgmt_class)
{
	struct ib_mad_port_private *port_priv;
	struct ib_mad_mgmt_class_table **class;
	struct ib_mad_mgmt_method_table **method;
	int i, ret;

	port_priv = agent_priv->qp_info->port_priv;
	class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
	if (!*class) {
		/* Allocate management class table for "new" class version */
R
Roland Dreier 已提交
1450
		*class = kzalloc(sizeof **class, GFP_ATOMIC);
L
Linus Torvalds 已提交
1451 1452 1453 1454
		if (!*class) {
			ret = -ENOMEM;
			goto error1;
		}
R
Roland Dreier 已提交
1455

L
Linus Torvalds 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473
		/* Allocate method table for this management class */
		method = &(*class)->method_table[mgmt_class];
		if ((ret = allocate_method_table(method)))
			goto error2;
	} else {
		method = &(*class)->method_table[mgmt_class];
		if (!*method) {
			/* Allocate method table for this management class */
			if ((ret = allocate_method_table(method)))
				goto error1;
		}
	}

	/* Now, make sure methods are not already in use */
	if (method_in_use(method, mad_reg_req))
		goto error3;

	/* Finally, add in methods being registered */
1474
	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
L
Linus Torvalds 已提交
1475
		(*method)->agent[i] = agent_priv;
1476

L
Linus Torvalds 已提交
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
	return 0;

error3:
	/* Remove any methods for this mad agent */
	remove_methods_mad_agent(*method, agent_priv);
	/* Now, check to see if there are any methods in use */
	if (!check_method_table(*method)) {
		/* If not, release management method table */
		kfree(*method);
		*method = NULL;
	}
	ret = -EINVAL;
	goto error1;
error2:
	kfree(*class);
	*class = NULL;
error1:
	return ret;
}

static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
			   struct ib_mad_agent_private *agent_priv)
{
	struct ib_mad_port_private *port_priv;
	struct ib_mad_mgmt_vendor_class_table **vendor_table;
	struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
	struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
	struct ib_mad_mgmt_method_table **method;
	int i, ret = -ENOMEM;
	u8 vclass;

	/* "New" vendor (with OUI) class */
	vclass = vendor_class_index(mad_reg_req->mgmt_class);
	port_priv = agent_priv->qp_info->port_priv;
	vendor_table = &port_priv->version[
				mad_reg_req->mgmt_class_version].vendor;
	if (!*vendor_table) {
		/* Allocate mgmt vendor class table for "new" class version */
R
Roland Dreier 已提交
1515
		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1516
		if (!vendor)
L
Linus Torvalds 已提交
1517
			goto error1;
R
Roland Dreier 已提交
1518

L
Linus Torvalds 已提交
1519 1520 1521 1522
		*vendor_table = vendor;
	}
	if (!(*vendor_table)->vendor_class[vclass]) {
		/* Allocate table for this management vendor class */
R
Roland Dreier 已提交
1523
		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1524
		if (!vendor_class)
L
Linus Torvalds 已提交
1525
			goto error2;
R
Roland Dreier 已提交
1526

L
Linus Torvalds 已提交
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
		(*vendor_table)->vendor_class[vclass] = vendor_class;
	}
	for (i = 0; i < MAX_MGMT_OUI; i++) {
		/* Is there matching OUI for this vendor class ? */
		if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
			    mad_reg_req->oui, 3)) {
			method = &(*vendor_table)->vendor_class[
						vclass]->method_table[i];
			BUG_ON(!*method);
			goto check_in_use;
		}
	}
	for (i = 0; i < MAX_MGMT_OUI; i++) {
		/* OUI slot available ? */
		if (!is_vendor_oui((*vendor_table)->vendor_class[
				vclass]->oui[i])) {
			method = &(*vendor_table)->vendor_class[
				vclass]->method_table[i];
			BUG_ON(*method);
			/* Allocate method table for this OUI */
			if ((ret = allocate_method_table(method)))
				goto error3;
			memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
			       mad_reg_req->oui, 3);
			goto check_in_use;
		}
	}
1554
	dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
L
Linus Torvalds 已提交
1555 1556 1557 1558 1559 1560 1561 1562
	goto error3;

check_in_use:
	/* Now, make sure methods are not already in use */
	if (method_in_use(method, mad_reg_req))
		goto error4;

	/* Finally, add in methods being registered */
1563
	for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
L
Linus Torvalds 已提交
1564
		(*method)->agent[i] = agent_priv;
1565

L
Linus Torvalds 已提交
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	return 0;

error4:
	/* Remove any methods for this mad agent */
	remove_methods_mad_agent(*method, agent_priv);
	/* Now, check to see if there are any methods in use */
	if (!check_method_table(*method)) {
		/* If not, release management method table */
		kfree(*method);
		*method = NULL;
	}
	ret = -EINVAL;
error3:
	if (vendor_class) {
		(*vendor_table)->vendor_class[vclass] = NULL;
		kfree(vendor_class);
	}
error2:
	if (vendor) {
		*vendor_table = NULL;
		kfree(vendor);
	}
error1:
	return ret;
}

static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
{
	struct ib_mad_port_private *port_priv;
	struct ib_mad_mgmt_class_table *class;
	struct ib_mad_mgmt_method_table *method;
	struct ib_mad_mgmt_vendor_class_table *vendor;
	struct ib_mad_mgmt_vendor_class *vendor_class;
	int index;
	u8 mgmt_class;

	/*
	 * Was MAD registration request supplied
	 * with original registration ?
	 */
	if (!agent_priv->reg_req) {
		goto out;
	}

	port_priv = agent_priv->qp_info->port_priv;
	mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
	class = port_priv->version[
			agent_priv->reg_req->mgmt_class_version].class;
	if (!class)
		goto vendor_check;

	method = class->method_table[mgmt_class];
	if (method) {
		/* Remove any methods for this mad agent */
		remove_methods_mad_agent(method, agent_priv);
		/* Now, check to see if there are any methods still in use */
		if (!check_method_table(method)) {
			/* If not, release management method table */
B
Bart Van Assche 已提交
1624 1625 1626
			kfree(method);
			class->method_table[mgmt_class] = NULL;
			/* Any management classes left ? */
L
Linus Torvalds 已提交
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
			if (!check_class_table(class)) {
				/* If not, release management class table */
				kfree(class);
				port_priv->version[
					agent_priv->reg_req->
					mgmt_class_version].class = NULL;
			}
		}
	}

vendor_check:
	if (!is_vendor_class(mgmt_class))
		goto out;

	/* normalize mgmt_class to vendor range 2 */
	mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
	vendor = port_priv->version[
			agent_priv->reg_req->mgmt_class_version].vendor;

	if (!vendor)
		goto out;

	vendor_class = vendor->vendor_class[mgmt_class];
	if (vendor_class) {
		index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
		if (index < 0)
			goto out;
		method = vendor_class->method_table[index];
		if (method) {
			/* Remove any methods for this mad agent */
			remove_methods_mad_agent(method, agent_priv);
			/*
			 * Now, check to see if there are
			 * any methods still in use
			 */
			if (!check_method_table(method)) {
				/* If not, release management method table */
				kfree(method);
				vendor_class->method_table[index] = NULL;
				memset(vendor_class->oui[index], 0, 3);
				/* Any OUIs left ? */
				if (!check_vendor_class(vendor_class)) {
					/* If not, release vendor class table */
					kfree(vendor_class);
					vendor->vendor_class[mgmt_class] = NULL;
					/* Any other vendor classes left ? */
					if (!check_vendor_table(vendor)) {
						kfree(vendor);
						port_priv->version[
							agent_priv->reg_req->
							mgmt_class_version].
							vendor = NULL;
					}
				}
			}
		}
	}

out:
	return;
}

static struct ib_mad_agent_private *
find_mad_agent(struct ib_mad_port_private *port_priv,
1691
	       const struct ib_mad_hdr *mad_hdr)
L
Linus Torvalds 已提交
1692 1693 1694 1695 1696
{
	struct ib_mad_agent_private *mad_agent = NULL;
	unsigned long flags;

	spin_lock_irqsave(&port_priv->reg_lock, flags);
1697
	if (ib_response_mad(mad_hdr)) {
L
Linus Torvalds 已提交
1698 1699 1700 1701 1702 1703 1704
		u32 hi_tid;
		struct ib_mad_agent_private *entry;

		/*
		 * Routing is based on high 32 bits of transaction ID
		 * of MAD.
		 */
1705
		hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1706
		list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
L
Linus Torvalds 已提交
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
			if (entry->agent.hi_tid == hi_tid) {
				mad_agent = entry;
				break;
			}
		}
	} else {
		struct ib_mad_mgmt_class_table *class;
		struct ib_mad_mgmt_method_table *method;
		struct ib_mad_mgmt_vendor_class_table *vendor;
		struct ib_mad_mgmt_vendor_class *vendor_class;
1717
		const struct ib_vendor_mad *vendor_mad;
L
Linus Torvalds 已提交
1718 1719 1720 1721 1722 1723
		int index;

		/*
		 * Routing is based on version, class, and method
		 * For "newer" vendor MADs, also based on OUI
		 */
1724
		if (mad_hdr->class_version >= MAX_MGMT_VERSION)
L
Linus Torvalds 已提交
1725
			goto out;
1726
		if (!is_vendor_class(mad_hdr->mgmt_class)) {
L
Linus Torvalds 已提交
1727
			class = port_priv->version[
1728
					mad_hdr->class_version].class;
L
Linus Torvalds 已提交
1729 1730
			if (!class)
				goto out;
1731
			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
B
Bart Van Assche 已提交
1732
			    ARRAY_SIZE(class->method_table))
1733
				goto out;
L
Linus Torvalds 已提交
1734
			method = class->method_table[convert_mgmt_class(
1735
							mad_hdr->mgmt_class)];
L
Linus Torvalds 已提交
1736
			if (method)
1737
				mad_agent = method->agent[mad_hdr->method &
L
Linus Torvalds 已提交
1738 1739 1740
							  ~IB_MGMT_METHOD_RESP];
		} else {
			vendor = port_priv->version[
1741
					mad_hdr->class_version].vendor;
L
Linus Torvalds 已提交
1742 1743 1744
			if (!vendor)
				goto out;
			vendor_class = vendor->vendor_class[vendor_class_index(
1745
						mad_hdr->mgmt_class)];
L
Linus Torvalds 已提交
1746 1747 1748
			if (!vendor_class)
				goto out;
			/* Find matching OUI */
1749
			vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
L
Linus Torvalds 已提交
1750 1751 1752 1753 1754
			index = find_vendor_oui(vendor_class, vendor_mad->oui);
			if (index == -1)
				goto out;
			method = vendor_class->method_table[index];
			if (method) {
1755
				mad_agent = method->agent[mad_hdr->method &
L
Linus Torvalds 已提交
1756 1757 1758 1759 1760 1761 1762 1763 1764
							  ~IB_MGMT_METHOD_RESP];
			}
		}
	}

	if (mad_agent) {
		if (mad_agent->agent.recv_handler)
			atomic_inc(&mad_agent->refcount);
		else {
1765 1766 1767
			dev_notice(&port_priv->device->dev,
				   "No receive handler for client %p on port %d\n",
				   &mad_agent->agent, port_priv->port_num);
L
Linus Torvalds 已提交
1768 1769 1770 1771 1772 1773 1774 1775 1776
			mad_agent = NULL;
		}
	}
out:
	spin_unlock_irqrestore(&port_priv->reg_lock, flags);

	return mad_agent;
}

I
Ira Weiny 已提交
1777 1778 1779
static int validate_mad(const struct ib_mad_hdr *mad_hdr,
			const struct ib_mad_qp_info *qp_info,
			bool opa)
L
Linus Torvalds 已提交
1780 1781
{
	int valid = 0;
I
Ira Weiny 已提交
1782
	u32 qp_num = qp_info->qp->qp_num;
L
Linus Torvalds 已提交
1783 1784

	/* Make sure MAD base version is understood */
I
Ira Weiny 已提交
1785 1786 1787 1788
	if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
	    (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
		pr_err("MAD received with unsupported base version %d %s\n",
		       mad_hdr->base_version, opa ? "(opa)" : "");
L
Linus Torvalds 已提交
1789 1790 1791 1792
		goto out;
	}

	/* Filter SMI packets sent to other than QP0 */
1793 1794
	if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
	    (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
L
Linus Torvalds 已提交
1795 1796 1797
		if (qp_num == 0)
			valid = 1;
	} else {
1798 1799 1800 1801 1802
		/* CM attributes other than ClassPortInfo only use Send method */
		if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
		    (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
		    (mad_hdr->method != IB_MGMT_METHOD_SEND))
			goto out;
L
Linus Torvalds 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811
		/* Filter GSI packets sent to QP0 */
		if (qp_num != 0)
			valid = 1;
	}

out:
	return valid;
}

1812 1813
static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
			    const struct ib_mad_hdr *mad_hdr)
1814 1815 1816 1817 1818
{
	struct ib_rmpp_mad *rmpp_mad;

	rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
	return !mad_agent_priv->agent.rmpp_version ||
I
Ira Weiny 已提交
1819
		!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1820 1821 1822 1823 1824
		!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
				    IB_MGMT_RMPP_FLAG_ACTIVE) ||
		(rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
}

I
Ira Weiny 已提交
1825 1826
static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
				     const struct ib_mad_recv_wc *rwc)
1827
{
I
Ira Weiny 已提交
1828
	return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1829 1830 1831
		rwc->recv_buf.mad->mad_hdr.mgmt_class;
}

1832 1833 1834
static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
				   const struct ib_mad_send_wr_private *wr,
				   const struct ib_mad_recv_wc *rwc )
1835
{
1836
	struct rdma_ah_attr attr;
1837
	u8 send_resp, rcv_resp;
1838 1839 1840 1841
	union ib_gid sgid;
	struct ib_device *device = mad_agent_priv->agent.device;
	u8 port_num = mad_agent_priv->agent.port_num;
	u8 lmc;
1842

1843 1844
	send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
	rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1845 1846 1847 1848 1849 1850 1851 1852 1853

	if (send_resp == rcv_resp)
		/* both requests, or both responses. GIDs different */
		return 0;

	if (ib_query_ah(wr->send_buf.ah, &attr))
		/* Assume not equal, to avoid false positives. */
		return 0;

1854 1855
	if (!!(attr.ah_flags & IB_AH_GRH) !=
	    !!(rwc->wc->wc_flags & IB_WC_GRH))
1856 1857
		/* one has GID, other does not.  Assume different */
		return 0;
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868

	if (!send_resp && rcv_resp) {
		/* is request/response. */
		if (!(attr.ah_flags & IB_AH_GRH)) {
			if (ib_get_cached_lmc(device, port_num, &lmc))
				return 0;
			return (!lmc || !((attr.src_path_bits ^
					   rwc->wc->dlid_path_bits) &
					  ((1 << lmc) - 1)));
		} else {
			if (ib_get_cached_gid(device, port_num,
1869
					      attr.grh.sgid_index, &sgid, NULL))
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
				return 0;
			return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
				       16);
		}
	}

	if (!(attr.ah_flags & IB_AH_GRH))
		return attr.dlid == rwc->wc->slid;
	else
		return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
			       16);
}

static inline int is_direct(u8 class)
{
	return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1886
}
1887

1888
struct ib_mad_send_wr_private*
1889 1890
ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
		 const struct ib_mad_recv_wc *wc)
L
Linus Torvalds 已提交
1891
{
1892
	struct ib_mad_send_wr_private *wr;
1893
	const struct ib_mad_hdr *mad_hdr;
1894

1895
	mad_hdr = &wc->recv_buf.mad->mad_hdr;
1896 1897

	list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1898
		if ((wr->tid == mad_hdr->tid) &&
1899 1900 1901 1902 1903
		    rcv_has_same_class(wr, wc) &&
		    /*
		     * Don't check GID for direct routed MADs.
		     * These might have permissive LIDs.
		     */
1904
		    (is_direct(mad_hdr->mgmt_class) ||
1905
		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
1906
			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
L
Linus Torvalds 已提交
1907 1908 1909 1910 1911 1912
	}

	/*
	 * It's possible to receive the response before we've
	 * been notified that the send has completed
	 */
1913
	list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1914
		if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1915
		    wr->tid == mad_hdr->tid &&
1916 1917 1918 1919 1920 1921
		    wr->timeout &&
		    rcv_has_same_class(wr, wc) &&
		    /*
		     * Don't check GID for direct routed MADs.
		     * These might have permissive LIDs.
		     */
1922
		    (is_direct(mad_hdr->mgmt_class) ||
1923
		     rcv_has_same_gid(mad_agent_priv, wr, wc)))
L
Linus Torvalds 已提交
1924
			/* Verify request has not been canceled */
1925
			return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
L
Linus Torvalds 已提交
1926 1927 1928 1929
	}
	return NULL;
}

1930
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1931 1932
{
	mad_send_wr->timeout = 0;
A
Akinobu Mita 已提交
1933 1934
	if (mad_send_wr->refcount == 1)
		list_move_tail(&mad_send_wr->agent_list,
1935 1936 1937
			      &mad_send_wr->mad_agent_priv->done_list);
}

L
Linus Torvalds 已提交
1938
static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1939
				 struct ib_mad_recv_wc *mad_recv_wc)
L
Linus Torvalds 已提交
1940 1941 1942 1943 1944
{
	struct ib_mad_send_wr_private *mad_send_wr;
	struct ib_mad_send_wc mad_send_wc;
	unsigned long flags;

1945 1946
	INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
	list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
I
Ira Weiny 已提交
1947
	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1948 1949 1950
		mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
						      mad_recv_wc);
		if (!mad_recv_wc) {
S
Sean Hefty 已提交
1951
			deref_mad_agent(mad_agent_priv);
1952 1953 1954 1955
			return;
		}
	}

L
Linus Torvalds 已提交
1956
	/* Complete corresponding request */
1957
	if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
L
Linus Torvalds 已提交
1958
		spin_lock_irqsave(&mad_agent_priv->lock, flags);
1959
		mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
L
Linus Torvalds 已提交
1960 1961
		if (!mad_send_wr) {
			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
I
Ira Weiny 已提交
1962 1963 1964 1965 1966 1967 1968
			if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
			   && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
			   && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
					& IB_MGMT_RMPP_FLAG_ACTIVE)) {
				/* user rmpp is in effect
				 * and this is an active RMPP MAD
				 */
1969 1970 1971
				mad_agent_priv->agent.recv_handler(
						&mad_agent_priv->agent, NULL,
						mad_recv_wc);
I
Ira Weiny 已提交
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
				atomic_dec(&mad_agent_priv->refcount);
			} else {
				/* not user rmpp, revert to normal behavior and
				 * drop the mad */
				ib_free_recv_mad(mad_recv_wc);
				deref_mad_agent(mad_agent_priv);
				return;
			}
		} else {
			ib_mark_mad_done(mad_send_wr);
			spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
L
Linus Torvalds 已提交
1983

I
Ira Weiny 已提交
1984
			/* Defined behavior is to complete response before request */
1985 1986 1987 1988
			mad_agent_priv->agent.recv_handler(
					&mad_agent_priv->agent,
					&mad_send_wr->send_buf,
					mad_recv_wc);
I
Ira Weiny 已提交
1989
			atomic_dec(&mad_agent_priv->refcount);
L
Linus Torvalds 已提交
1990

I
Ira Weiny 已提交
1991 1992 1993 1994 1995
			mad_send_wc.status = IB_WC_SUCCESS;
			mad_send_wc.vendor_err = 0;
			mad_send_wc.send_buf = &mad_send_wr->send_buf;
			ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
		}
L
Linus Torvalds 已提交
1996
	} else {
1997
		mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
1998
						   mad_recv_wc);
S
Sean Hefty 已提交
1999
		deref_mad_agent(mad_agent_priv);
L
Linus Torvalds 已提交
2000 2001 2002
	}
}

2003 2004 2005 2006 2007 2008 2009 2010
static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
				     const struct ib_mad_qp_info *qp_info,
				     const struct ib_wc *wc,
				     int port_num,
				     struct ib_mad_private *recv,
				     struct ib_mad_private *response)
{
	enum smi_forward_action retsmi;
2011
	struct ib_smp *smp = (struct ib_smp *)recv->mad;
2012

2013
	if (smi_handle_dr_smp_recv(smp,
2014
				   rdma_cap_ib_switch(port_priv->device),
2015 2016 2017 2018 2019
				   port_num,
				   port_priv->device->phys_port_cnt) ==
				   IB_SMI_DISCARD)
		return IB_SMI_DISCARD;

2020
	retsmi = smi_check_forward_dr_smp(smp);
2021 2022 2023 2024
	if (retsmi == IB_SMI_LOCAL)
		return IB_SMI_HANDLE;

	if (retsmi == IB_SMI_SEND) { /* don't forward */
2025
		if (smi_handle_dr_smp_send(smp,
2026
					   rdma_cap_ib_switch(port_priv->device),
2027 2028 2029
					   port_num) == IB_SMI_DISCARD)
			return IB_SMI_DISCARD;

2030
		if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2031
			return IB_SMI_DISCARD;
2032
	} else if (rdma_cap_ib_switch(port_priv->device)) {
2033
		/* forward case for switches */
2034
		memcpy(response, recv, mad_priv_size(response));
2035
		response->header.recv_wc.wc = &response->header.wc;
2036
		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2037 2038
		response->header.recv_wc.recv_buf.grh = &response->grh;

2039
		agent_send_response((const struct ib_mad_hdr *)response->mad,
2040 2041
				    &response->grh, wc,
				    port_priv->device,
2042 2043
				    smi_get_fwd_port(smp),
				    qp_info->qp->qp_num,
I
Ira Weiny 已提交
2044 2045
				    response->mad_size,
				    false);
2046 2047 2048 2049 2050 2051

		return IB_SMI_DISCARD;
	}
	return IB_SMI_HANDLE;
}

2052
static bool generate_unmatched_resp(const struct ib_mad_private *recv,
I
Ira Weiny 已提交
2053 2054
				    struct ib_mad_private *response,
				    size_t *resp_len, bool opa)
2055
{
2056 2057 2058 2059 2060 2061
	const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
	struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;

	if (recv_hdr->method == IB_MGMT_METHOD_GET ||
	    recv_hdr->method == IB_MGMT_METHOD_SET) {
		memcpy(response, recv, mad_priv_size(response));
2062
		response->header.recv_wc.wc = &response->header.wc;
2063
		response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2064
		response->header.recv_wc.recv_buf.grh = &response->grh;
2065 2066 2067 2068
		resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
		resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
		if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
			resp_hdr->status |= IB_SMP_DIRECTION;
2069

I
Ira Weiny 已提交
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
		if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
			if (recv_hdr->mgmt_class ==
			    IB_MGMT_CLASS_SUBN_LID_ROUTED ||
			    recv_hdr->mgmt_class ==
			    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
				*resp_len = opa_get_smp_header_size(
							(struct opa_smp *)recv->mad);
			else
				*resp_len = sizeof(struct ib_mad_hdr);
		}

2081 2082 2083 2084 2085
		return true;
	} else {
		return false;
	}
}
I
Ira Weiny 已提交
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098

static enum smi_action
handle_opa_smi(struct ib_mad_port_private *port_priv,
	       struct ib_mad_qp_info *qp_info,
	       struct ib_wc *wc,
	       int port_num,
	       struct ib_mad_private *recv,
	       struct ib_mad_private *response)
{
	enum smi_forward_action retsmi;
	struct opa_smp *smp = (struct opa_smp *)recv->mad;

	if (opa_smi_handle_dr_smp_recv(smp,
2099
				   rdma_cap_ib_switch(port_priv->device),
I
Ira Weiny 已提交
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
				   port_num,
				   port_priv->device->phys_port_cnt) ==
				   IB_SMI_DISCARD)
		return IB_SMI_DISCARD;

	retsmi = opa_smi_check_forward_dr_smp(smp);
	if (retsmi == IB_SMI_LOCAL)
		return IB_SMI_HANDLE;

	if (retsmi == IB_SMI_SEND) { /* don't forward */
		if (opa_smi_handle_dr_smp_send(smp,
2111
					   rdma_cap_ib_switch(port_priv->device),
I
Ira Weiny 已提交
2112 2113 2114 2115 2116 2117 2118
					   port_num) == IB_SMI_DISCARD)
			return IB_SMI_DISCARD;

		if (opa_smi_check_local_smp(smp, port_priv->device) ==
		    IB_SMI_DISCARD)
			return IB_SMI_DISCARD;

2119
	} else if (rdma_cap_ib_switch(port_priv->device)) {
I
Ira Weiny 已提交
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
		/* forward case for switches */
		memcpy(response, recv, mad_priv_size(response));
		response->header.recv_wc.wc = &response->header.wc;
		response->header.recv_wc.recv_buf.opa_mad =
				(struct opa_mad *)response->mad;
		response->header.recv_wc.recv_buf.grh = &response->grh;

		agent_send_response((const struct ib_mad_hdr *)response->mad,
				    &response->grh, wc,
				    port_priv->device,
				    opa_smi_get_fwd_port(smp),
				    qp_info->qp->qp_num,
				    recv->header.wc.byte_len,
				    true);

		return IB_SMI_DISCARD;
	}

	return IB_SMI_HANDLE;
}

static enum smi_action
handle_smi(struct ib_mad_port_private *port_priv,
	   struct ib_mad_qp_info *qp_info,
	   struct ib_wc *wc,
	   int port_num,
	   struct ib_mad_private *recv,
	   struct ib_mad_private *response,
	   bool opa)
{
	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;

	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2153
	    mad_hdr->class_version == OPA_SM_CLASS_VERSION)
I
Ira Weiny 已提交
2154 2155 2156 2157 2158 2159
		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
				      response);

	return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
}

C
Christoph Hellwig 已提交
2160
static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
L
Linus Torvalds 已提交
2161
{
C
Christoph Hellwig 已提交
2162 2163 2164
	struct ib_mad_port_private *port_priv = cq->cq_context;
	struct ib_mad_list_head *mad_list =
		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
L
Linus Torvalds 已提交
2165 2166
	struct ib_mad_qp_info *qp_info;
	struct ib_mad_private_header *mad_priv_hdr;
2167
	struct ib_mad_private *recv, *response = NULL;
L
Linus Torvalds 已提交
2168
	struct ib_mad_agent_private *mad_agent;
2169
	int port_num;
2170
	int ret = IB_MAD_RESULT_SUCCESS;
2171 2172
	size_t mad_size;
	u16 resp_mad_pkey_index = 0;
I
Ira Weiny 已提交
2173
	bool opa;
L
Linus Torvalds 已提交
2174

C
Christoph Hellwig 已提交
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
	if (list_empty_careful(&port_priv->port_list))
		return;

	if (wc->status != IB_WC_SUCCESS) {
		/*
		 * Receive errors indicate that the QP has entered the error
		 * state - error handling/shutdown code will cleanup
		 */
		return;
	}

L
Linus Torvalds 已提交
2186 2187 2188
	qp_info = mad_list->mad_queue->qp_info;
	dequeue_mad(mad_list);

I
Ira Weiny 已提交
2189 2190 2191
	opa = rdma_cap_opa_mad(qp_info->port_priv->device,
			       qp_info->port_priv->port_num);

L
Linus Torvalds 已提交
2192 2193 2194
	mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
				    mad_list);
	recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2195 2196
	ib_dma_unmap_single(port_priv->device,
			    recv->header.mapping,
2197
			    mad_priv_dma_size(recv),
2198
			    DMA_FROM_DEVICE);
L
Linus Torvalds 已提交
2199 2200

	/* Setup MAD receive work completion from "normal" work completion */
2201 2202
	recv->header.wc = *wc;
	recv->header.recv_wc.wc = &recv->header.wc;
I
Ira Weiny 已提交
2203 2204 2205 2206 2207 2208 2209 2210 2211

	if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
		recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
		recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
	} else {
		recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
		recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
	}

2212
	recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
L
Linus Torvalds 已提交
2213 2214 2215 2216 2217 2218
	recv->header.recv_wc.recv_buf.grh = &recv->grh;

	if (atomic_read(&qp_info->snoop_count))
		snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);

	/* Validate MAD */
I
Ira Weiny 已提交
2219
	if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
L
Linus Torvalds 已提交
2220 2221
		goto out;

2222 2223
	mad_size = recv->mad_size;
	response = alloc_mad_private(mad_size, GFP_KERNEL);
2224
	if (!response)
2225 2226
		goto out;

2227
	if (rdma_cap_ib_switch(port_priv->device))
2228 2229 2230 2231
		port_num = wc->port_num;
	else
		port_num = port_priv->port_num;

2232
	if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
L
Linus Torvalds 已提交
2233
	    IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
I
Ira Weiny 已提交
2234 2235
		if (handle_smi(port_priv, qp_info, wc, port_num, recv,
			       response, opa)
2236
		    == IB_SMI_DISCARD)
L
Linus Torvalds 已提交
2237 2238 2239 2240 2241 2242 2243 2244
			goto out;
	}

	/* Give driver "right of first refusal" on incoming MAD */
	if (port_priv->device->process_mad) {
		ret = port_priv->device->process_mad(port_priv->device, 0,
						     port_priv->port_num,
						     wc, &recv->grh,
2245 2246 2247 2248
						     (const struct ib_mad_hdr *)recv->mad,
						     recv->mad_size,
						     (struct ib_mad_hdr *)response->mad,
						     &mad_size, &resp_mad_pkey_index);
I
Ira Weiny 已提交
2249 2250 2251 2252

		if (opa)
			wc->pkey_index = resp_mad_pkey_index;

L
Linus Torvalds 已提交
2253 2254 2255 2256
		if (ret & IB_MAD_RESULT_SUCCESS) {
			if (ret & IB_MAD_RESULT_CONSUMED)
				goto out;
			if (ret & IB_MAD_RESULT_REPLY) {
2257
				agent_send_response((const struct ib_mad_hdr *)response->mad,
2258 2259
						    &recv->grh, wc,
						    port_priv->device,
2260
						    port_num,
2261
						    qp_info->qp->qp_num,
I
Ira Weiny 已提交
2262
						    mad_size, opa);
L
Linus Torvalds 已提交
2263 2264 2265 2266 2267
				goto out;
			}
		}
	}

2268
	mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
L
Linus Torvalds 已提交
2269
	if (mad_agent) {
2270
		ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
L
Linus Torvalds 已提交
2271 2272 2273 2274 2275
		/*
		 * recv is freed up in error cases in ib_mad_complete_recv
		 * or via recv_handler in ib_mad_complete_recv()
		 */
		recv = NULL;
2276
	} else if ((ret & IB_MAD_RESULT_SUCCESS) &&
I
Ira Weiny 已提交
2277
		   generate_unmatched_resp(recv, response, &mad_size, opa)) {
2278 2279
		agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
				    port_priv->device, port_num,
I
Ira Weiny 已提交
2280
				    qp_info->qp->qp_num, mad_size, opa);
L
Linus Torvalds 已提交
2281 2282 2283 2284 2285 2286
	}

out:
	/* Post another receive request for this QP */
	if (response) {
		ib_mad_post_receive_mads(qp_info, response);
2287
		kfree(recv);
L
Linus Torvalds 已提交
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
	} else
		ib_mad_post_receive_mads(qp_info, recv);
}

static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
{
	struct ib_mad_send_wr_private *mad_send_wr;
	unsigned long delay;

	if (list_empty(&mad_agent_priv->wait_list)) {
2298
		cancel_delayed_work(&mad_agent_priv->timed_work);
L
Linus Torvalds 已提交
2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
	} else {
		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
					 struct ib_mad_send_wr_private,
					 agent_list);

		if (time_after(mad_agent_priv->timeout,
			       mad_send_wr->timeout)) {
			mad_agent_priv->timeout = mad_send_wr->timeout;
			delay = mad_send_wr->timeout - jiffies;
			if ((long)delay <= 0)
				delay = 1;
2310 2311
			mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
					 &mad_agent_priv->timed_work, delay);
L
Linus Torvalds 已提交
2312 2313 2314 2315
		}
	}
}

2316
static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
L
Linus Torvalds 已提交
2317
{
2318
	struct ib_mad_agent_private *mad_agent_priv;
L
Linus Torvalds 已提交
2319 2320 2321 2322
	struct ib_mad_send_wr_private *temp_mad_send_wr;
	struct list_head *list_item;
	unsigned long delay;

2323
	mad_agent_priv = mad_send_wr->mad_agent_priv;
L
Linus Torvalds 已提交
2324 2325 2326 2327 2328
	list_del(&mad_send_wr->agent_list);

	delay = mad_send_wr->timeout;
	mad_send_wr->timeout += jiffies;

2329 2330 2331 2332 2333 2334 2335 2336 2337
	if (delay) {
		list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
			temp_mad_send_wr = list_entry(list_item,
						struct ib_mad_send_wr_private,
						agent_list);
			if (time_after(mad_send_wr->timeout,
				       temp_mad_send_wr->timeout))
				break;
		}
L
Linus Torvalds 已提交
2338
	}
2339 2340
	else
		list_item = &mad_agent_priv->wait_list;
L
Linus Torvalds 已提交
2341 2342 2343
	list_add(&mad_send_wr->agent_list, list_item);

	/* Reschedule a work item if we have a shorter timeout */
2344 2345 2346
	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
		mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
				 &mad_agent_priv->timed_work, delay);
L
Linus Torvalds 已提交
2347 2348
}

2349 2350 2351 2352 2353 2354 2355
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
			  int timeout_ms)
{
	mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
	wait_for_response(mad_send_wr);
}

L
Linus Torvalds 已提交
2356 2357 2358
/*
 * Process a send work completion
 */
2359 2360
void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
			     struct ib_mad_send_wc *mad_send_wc)
L
Linus Torvalds 已提交
2361 2362 2363
{
	struct ib_mad_agent_private	*mad_agent_priv;
	unsigned long			flags;
2364
	int				ret;
L
Linus Torvalds 已提交
2365

2366
	mad_agent_priv = mad_send_wr->mad_agent_priv;
L
Linus Torvalds 已提交
2367
	spin_lock_irqsave(&mad_agent_priv->lock, flags);
I
Ira Weiny 已提交
2368
	if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2369 2370 2371 2372 2373 2374
		ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
		if (ret == IB_RMPP_RESULT_CONSUMED)
			goto done;
	} else
		ret = IB_RMPP_RESULT_UNHANDLED;

L
Linus Torvalds 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383
	if (mad_send_wc->status != IB_WC_SUCCESS &&
	    mad_send_wr->status == IB_WC_SUCCESS) {
		mad_send_wr->status = mad_send_wc->status;
		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
	}

	if (--mad_send_wr->refcount > 0) {
		if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
		    mad_send_wr->status == IB_WC_SUCCESS) {
2384
			wait_for_response(mad_send_wr);
L
Linus Torvalds 已提交
2385
		}
2386
		goto done;
L
Linus Torvalds 已提交
2387 2388 2389 2390 2391 2392 2393 2394 2395
	}

	/* Remove send from MAD agent and notify client of completion */
	list_del(&mad_send_wr->agent_list);
	adjust_timeout(mad_agent_priv);
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);

	if (mad_send_wr->status != IB_WC_SUCCESS )
		mad_send_wc->status = mad_send_wr->status;
2396 2397 2398
	if (ret == IB_RMPP_RESULT_INTERNAL)
		ib_rmpp_send_handler(mad_send_wc);
	else
2399 2400
		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
						   mad_send_wc);
L
Linus Torvalds 已提交
2401 2402

	/* Release reference on agent taken when sending */
S
Sean Hefty 已提交
2403
	deref_mad_agent(mad_agent_priv);
2404 2405 2406
	return;
done:
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
L
Linus Torvalds 已提交
2407 2408
}

C
Christoph Hellwig 已提交
2409
static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
L
Linus Torvalds 已提交
2410
{
C
Christoph Hellwig 已提交
2411 2412 2413
	struct ib_mad_port_private *port_priv = cq->cq_context;
	struct ib_mad_list_head *mad_list =
		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
L
Linus Torvalds 已提交
2414 2415 2416 2417
	struct ib_mad_send_wr_private	*mad_send_wr, *queued_send_wr;
	struct ib_mad_qp_info		*qp_info;
	struct ib_mad_queue		*send_queue;
	struct ib_send_wr		*bad_send_wr;
2418
	struct ib_mad_send_wc		mad_send_wc;
L
Linus Torvalds 已提交
2419 2420 2421
	unsigned long flags;
	int ret;

C
Christoph Hellwig 已提交
2422 2423 2424 2425 2426 2427 2428 2429
	if (list_empty_careful(&port_priv->port_list))
		return;

	if (wc->status != IB_WC_SUCCESS) {
		if (!ib_mad_send_error(port_priv, wc))
			return;
	}

L
Linus Torvalds 已提交
2430 2431 2432 2433 2434 2435
	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
				   mad_list);
	send_queue = mad_list->mad_queue;
	qp_info = send_queue->qp_info;

retry:
2436 2437 2438 2439 2440 2441
	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
			    mad_send_wr->header_mapping,
			    mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
	ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
			    mad_send_wr->payload_mapping,
			    mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
L
Linus Torvalds 已提交
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
	queued_send_wr = NULL;
	spin_lock_irqsave(&send_queue->lock, flags);
	list_del(&mad_list->list);

	/* Move queued send to the send queue */
	if (send_queue->count-- > send_queue->max_active) {
		mad_list = container_of(qp_info->overflow_list.next,
					struct ib_mad_list_head, list);
		queued_send_wr = container_of(mad_list,
					struct ib_mad_send_wr_private,
					mad_list);
A
Akinobu Mita 已提交
2453
		list_move_tail(&mad_list->list, &send_queue->list);
L
Linus Torvalds 已提交
2454 2455 2456
	}
	spin_unlock_irqrestore(&send_queue->lock, flags);

2457 2458 2459
	mad_send_wc.send_buf = &mad_send_wr->send_buf;
	mad_send_wc.status = wc->status;
	mad_send_wc.vendor_err = wc->vendor_err;
L
Linus Torvalds 已提交
2460
	if (atomic_read(&qp_info->snoop_count))
2461
		snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
L
Linus Torvalds 已提交
2462
			   IB_MAD_SNOOP_SEND_COMPLETIONS);
2463
	ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
L
Linus Torvalds 已提交
2464 2465

	if (queued_send_wr) {
C
Christoph Hellwig 已提交
2466
		ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2467
				   &bad_send_wr);
L
Linus Torvalds 已提交
2468
		if (ret) {
2469 2470
			dev_err(&port_priv->device->dev,
				"ib_post_send failed: %d\n", ret);
L
Linus Torvalds 已提交
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
			mad_send_wr = queued_send_wr;
			wc->status = IB_WC_LOC_QP_OP_ERR;
			goto retry;
		}
	}
}

static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
{
	struct ib_mad_send_wr_private *mad_send_wr;
	struct ib_mad_list_head *mad_list;
	unsigned long flags;

	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
	list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
		mad_send_wr = container_of(mad_list,
					   struct ib_mad_send_wr_private,
					   mad_list);
		mad_send_wr->retry = 1;
	}
	spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
}

C
Christoph Hellwig 已提交
2494 2495
static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
		struct ib_wc *wc)
L
Linus Torvalds 已提交
2496
{
C
Christoph Hellwig 已提交
2497 2498 2499
	struct ib_mad_list_head *mad_list =
		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
	struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
L
Linus Torvalds 已提交
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
	struct ib_mad_send_wr_private *mad_send_wr;
	int ret;

	/*
	 * Send errors will transition the QP to SQE - move
	 * QP to RTS and repost flushed work requests
	 */
	mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
				   mad_list);
	if (wc->status == IB_WC_WR_FLUSH_ERR) {
		if (mad_send_wr->retry) {
			/* Repost send */
			struct ib_send_wr *bad_send_wr;

			mad_send_wr->retry = 0;
C
Christoph Hellwig 已提交
2515
			ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
L
Linus Torvalds 已提交
2516
					&bad_send_wr);
C
Christoph Hellwig 已提交
2517 2518 2519
			if (!ret)
				return false;
		}
L
Linus Torvalds 已提交
2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
	} else {
		struct ib_qp_attr *attr;

		/* Transition QP to RTS and fail offending send */
		attr = kmalloc(sizeof *attr, GFP_KERNEL);
		if (attr) {
			attr->qp_state = IB_QPS_RTS;
			attr->cur_qp_state = IB_QPS_SQE;
			ret = ib_modify_qp(qp_info->qp, attr,
					   IB_QP_STATE | IB_QP_CUR_STATE);
			kfree(attr);
			if (ret)
2532
				dev_err(&port_priv->device->dev,
C
Christoph Hellwig 已提交
2533 2534
					"%s - ib_modify_qp to RTS: %d\n",
					__func__, ret);
L
Linus Torvalds 已提交
2535 2536 2537 2538 2539
			else
				mark_sends_for_retry(qp_info);
		}
	}

C
Christoph Hellwig 已提交
2540
	return true;
L
Linus Torvalds 已提交
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555
}

static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
{
	unsigned long flags;
	struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
	struct ib_mad_send_wc mad_send_wc;
	struct list_head cancel_list;

	INIT_LIST_HEAD(&cancel_list);

	spin_lock_irqsave(&mad_agent_priv->lock, flags);
	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
				 &mad_agent_priv->send_list, agent_list) {
		if (mad_send_wr->status == IB_WC_SUCCESS) {
R
Roland Dreier 已提交
2556
			mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
L
Linus Torvalds 已提交
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
			mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
		}
	}

	/* Empty wait list to prevent receives from finding a request */
	list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);

	/* Report all cancelled requests */
	mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
	mad_send_wc.vendor_err = 0;

	list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
				 &cancel_list, agent_list) {
2571 2572
		mad_send_wc.send_buf = &mad_send_wr->send_buf;
		list_del(&mad_send_wr->agent_list);
L
Linus Torvalds 已提交
2573 2574 2575 2576 2577 2578 2579
		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
						   &mad_send_wc);
		atomic_dec(&mad_agent_priv->refcount);
	}
}

static struct ib_mad_send_wr_private*
2580 2581
find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
	     struct ib_mad_send_buf *send_buf)
L
Linus Torvalds 已提交
2582 2583 2584 2585 2586
{
	struct ib_mad_send_wr_private *mad_send_wr;

	list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
			    agent_list) {
2587
		if (&mad_send_wr->send_buf == send_buf)
L
Linus Torvalds 已提交
2588 2589 2590 2591 2592
			return mad_send_wr;
	}

	list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
			    agent_list) {
2593 2594
		if (is_rmpp_data_mad(mad_agent_priv,
				     mad_send_wr->send_buf.mad) &&
2595
		    &mad_send_wr->send_buf == send_buf)
L
Linus Torvalds 已提交
2596 2597 2598 2599 2600
			return mad_send_wr;
	}
	return NULL;
}

2601 2602
int ib_modify_mad(struct ib_mad_agent *mad_agent,
		  struct ib_mad_send_buf *send_buf, u32 timeout_ms)
L
Linus Torvalds 已提交
2603 2604 2605 2606
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_send_wr_private *mad_send_wr;
	unsigned long flags;
2607
	int active;
L
Linus Torvalds 已提交
2608 2609 2610 2611

	mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
				      agent);
	spin_lock_irqsave(&mad_agent_priv->lock, flags);
2612
	mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2613
	if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
L
Linus Torvalds 已提交
2614
		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2615
		return -EINVAL;
L
Linus Torvalds 已提交
2616 2617
	}

2618
	active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2619
	if (!timeout_ms) {
L
Linus Torvalds 已提交
2620
		mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2621
		mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
L
Linus Torvalds 已提交
2622 2623
	}

2624
	mad_send_wr->send_buf.timeout_ms = timeout_ms;
2625
	if (active)
2626 2627 2628 2629
		mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
	else
		ib_reset_mad_timeout(mad_send_wr, timeout_ms);

L
Linus Torvalds 已提交
2630
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2631 2632 2633
	return 0;
}
EXPORT_SYMBOL(ib_modify_mad);
L
Linus Torvalds 已提交
2634

2635 2636
void ib_cancel_mad(struct ib_mad_agent *mad_agent,
		   struct ib_mad_send_buf *send_buf)
2637
{
2638
	ib_modify_mad(mad_agent, send_buf, 0);
L
Linus Torvalds 已提交
2639 2640 2641
}
EXPORT_SYMBOL(ib_cancel_mad);

D
David Howells 已提交
2642
static void local_completions(struct work_struct *work)
L
Linus Torvalds 已提交
2643 2644 2645 2646 2647
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_local_private *local;
	struct ib_mad_agent_private *recv_mad_agent;
	unsigned long flags;
2648
	int free_mad;
L
Linus Torvalds 已提交
2649 2650
	struct ib_wc wc;
	struct ib_mad_send_wc mad_send_wc;
I
Ira Weiny 已提交
2651
	bool opa;
L
Linus Torvalds 已提交
2652

D
David Howells 已提交
2653 2654
	mad_agent_priv =
		container_of(work, struct ib_mad_agent_private, local_work);
L
Linus Torvalds 已提交
2655

I
Ira Weiny 已提交
2656 2657 2658
	opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
			       mad_agent_priv->qp_info->port_priv->port_num);

L
Linus Torvalds 已提交
2659 2660 2661 2662 2663
	spin_lock_irqsave(&mad_agent_priv->lock, flags);
	while (!list_empty(&mad_agent_priv->local_list)) {
		local = list_entry(mad_agent_priv->local_list.next,
				   struct ib_mad_local_private,
				   completion_list);
2664
		list_del(&local->completion_list);
L
Linus Torvalds 已提交
2665
		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2666
		free_mad = 0;
L
Linus Torvalds 已提交
2667
		if (local->mad_priv) {
I
Ira Weiny 已提交
2668
			u8 base_version;
L
Linus Torvalds 已提交
2669 2670
			recv_mad_agent = local->recv_mad_agent;
			if (!recv_mad_agent) {
2671 2672
				dev_err(&mad_agent_priv->agent.device->dev,
					"No receive MAD agent for local completion\n");
2673
				free_mad = 1;
L
Linus Torvalds 已提交
2674 2675 2676 2677 2678 2679 2680
				goto local_send_completion;
			}

			/*
			 * Defined behavior is to complete response
			 * before request
			 */
2681
			build_smp_wc(recv_mad_agent->agent.qp,
C
Christoph Hellwig 已提交
2682
				     local->mad_send_wr->send_wr.wr.wr_cqe,
2683
				     be16_to_cpu(IB_LID_PERMISSIVE),
C
Christoph Hellwig 已提交
2684
				     local->mad_send_wr->send_wr.pkey_index,
I
Ira Weiny 已提交
2685
				     recv_mad_agent->agent.port_num, &wc);
L
Linus Torvalds 已提交
2686 2687

			local->mad_priv->header.recv_wc.wc = &wc;
I
Ira Weiny 已提交
2688 2689 2690 2691 2692 2693 2694 2695 2696 2697

			base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
			if (opa && base_version == OPA_MGMT_BASE_VERSION) {
				local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
			} else {
				local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
				local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
			}

2698 2699 2700
			INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
			list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
				 &local->mad_priv->header.recv_wc.rmpp_list);
L
Linus Torvalds 已提交
2701 2702
			local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
			local->mad_priv->header.recv_wc.recv_buf.mad =
2703
						(struct ib_mad *)local->mad_priv->mad;
L
Linus Torvalds 已提交
2704 2705 2706 2707 2708 2709
			if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
				snoop_recv(recv_mad_agent->qp_info,
					  &local->mad_priv->header.recv_wc,
					   IB_MAD_SNOOP_RECVS);
			recv_mad_agent->agent.recv_handler(
						&recv_mad_agent->agent,
2710
						&local->mad_send_wr->send_buf,
L
Linus Torvalds 已提交
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
						&local->mad_priv->header.recv_wc);
			spin_lock_irqsave(&recv_mad_agent->lock, flags);
			atomic_dec(&recv_mad_agent->refcount);
			spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
		}

local_send_completion:
		/* Complete send */
		mad_send_wc.status = IB_WC_SUCCESS;
		mad_send_wc.vendor_err = 0;
2721
		mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
L
Linus Torvalds 已提交
2722
		if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2723 2724 2725
			snoop_send(mad_agent_priv->qp_info,
				   &local->mad_send_wr->send_buf,
				   &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
L
Linus Torvalds 已提交
2726 2727 2728 2729 2730
		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
						   &mad_send_wc);

		spin_lock_irqsave(&mad_agent_priv->lock, flags);
		atomic_dec(&mad_agent_priv->refcount);
2731
		if (free_mad)
2732
			kfree(local->mad_priv);
L
Linus Torvalds 已提交
2733 2734 2735 2736 2737
		kfree(local);
	}
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}

2738 2739 2740 2741
static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
{
	int ret;

2742
	if (!mad_send_wr->retries_left)
2743 2744
		return -ETIMEDOUT;

2745 2746 2747
	mad_send_wr->retries_left--;
	mad_send_wr->send_buf.retries++;

2748
	mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2749

I
Ira Weiny 已提交
2750
	if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
		ret = ib_retry_rmpp(mad_send_wr);
		switch (ret) {
		case IB_RMPP_RESULT_UNHANDLED:
			ret = ib_send_mad(mad_send_wr);
			break;
		case IB_RMPP_RESULT_CONSUMED:
			ret = 0;
			break;
		default:
			ret = -ECOMM;
			break;
		}
	} else
		ret = ib_send_mad(mad_send_wr);
2765 2766 2767 2768 2769 2770 2771 2772 2773

	if (!ret) {
		mad_send_wr->refcount++;
		list_add_tail(&mad_send_wr->agent_list,
			      &mad_send_wr->mad_agent_priv->send_list);
	}
	return ret;
}

D
David Howells 已提交
2774
static void timeout_sends(struct work_struct *work)
L
Linus Torvalds 已提交
2775 2776 2777 2778 2779 2780
{
	struct ib_mad_agent_private *mad_agent_priv;
	struct ib_mad_send_wr_private *mad_send_wr;
	struct ib_mad_send_wc mad_send_wc;
	unsigned long flags, delay;

D
David Howells 已提交
2781 2782
	mad_agent_priv = container_of(work, struct ib_mad_agent_private,
				      timed_work.work);
L
Linus Torvalds 已提交
2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
	mad_send_wc.vendor_err = 0;

	spin_lock_irqsave(&mad_agent_priv->lock, flags);
	while (!list_empty(&mad_agent_priv->wait_list)) {
		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
					 struct ib_mad_send_wr_private,
					 agent_list);

		if (time_after(mad_send_wr->timeout, jiffies)) {
			delay = mad_send_wr->timeout - jiffies;
			if ((long)delay <= 0)
				delay = 1;
			queue_delayed_work(mad_agent_priv->qp_info->
					   port_priv->wq,
					   &mad_agent_priv->timed_work, delay);
			break;
		}

2801
		list_del(&mad_send_wr->agent_list);
2802 2803
		if (mad_send_wr->status == IB_WC_SUCCESS &&
		    !retry_send(mad_send_wr))
2804 2805
			continue;

L
Linus Torvalds 已提交
2806 2807
		spin_unlock_irqrestore(&mad_agent_priv->lock, flags);

2808 2809 2810 2811
		if (mad_send_wr->status == IB_WC_SUCCESS)
			mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
		else
			mad_send_wc.status = mad_send_wr->status;
2812
		mad_send_wc.send_buf = &mad_send_wr->send_buf;
L
Linus Torvalds 已提交
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
		mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
						   &mad_send_wc);

		atomic_dec(&mad_agent_priv->refcount);
		spin_lock_irqsave(&mad_agent_priv->lock, flags);
	}
	spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
}

/*
 * Allocate receive MADs and post receive WRs for them
 */
static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
				    struct ib_mad_private *mad)
{
	unsigned long flags;
	int post, ret;
	struct ib_mad_private *mad_priv;
	struct ib_sge sg_list;
	struct ib_recv_wr recv_wr, *bad_recv_wr;
	struct ib_mad_queue *recv_queue = &qp_info->recv_queue;

	/* Initialize common scatter list fields */
2836
	sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
L
Linus Torvalds 已提交
2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848

	/* Initialize common receive WR fields */
	recv_wr.next = NULL;
	recv_wr.sg_list = &sg_list;
	recv_wr.num_sge = 1;

	do {
		/* Allocate and map receive buffer */
		if (mad) {
			mad_priv = mad;
			mad = NULL;
		} else {
2849 2850
			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
						     GFP_ATOMIC);
L
Linus Torvalds 已提交
2851 2852 2853 2854 2855
			if (!mad_priv) {
				ret = -ENOMEM;
				break;
			}
		}
2856
		sg_list.length = mad_priv_dma_size(mad_priv);
2857 2858
		sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
						 &mad_priv->grh,
2859
						 mad_priv_dma_size(mad_priv),
2860
						 DMA_FROM_DEVICE);
2861 2862 2863 2864 2865
		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
						  sg_list.addr))) {
			ret = -ENOMEM;
			break;
		}
2866
		mad_priv->header.mapping = sg_list.addr;
L
Linus Torvalds 已提交
2867
		mad_priv->header.mad_list.mad_queue = recv_queue;
C
Christoph Hellwig 已提交
2868 2869
		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
L
Linus Torvalds 已提交
2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881

		/* Post receive WR */
		spin_lock_irqsave(&recv_queue->lock, flags);
		post = (++recv_queue->count < recv_queue->max_active);
		list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
		spin_unlock_irqrestore(&recv_queue->lock, flags);
		ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
		if (ret) {
			spin_lock_irqsave(&recv_queue->lock, flags);
			list_del(&mad_priv->header.mad_list.list);
			recv_queue->count--;
			spin_unlock_irqrestore(&recv_queue->lock, flags);
2882 2883
			ib_dma_unmap_single(qp_info->port_priv->device,
					    mad_priv->header.mapping,
2884
					    mad_priv_dma_size(mad_priv),
2885
					    DMA_FROM_DEVICE);
2886
			kfree(mad_priv);
2887 2888
			dev_err(&qp_info->port_priv->device->dev,
				"ib_post_recv failed: %d\n", ret);
L
Linus Torvalds 已提交
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
			break;
		}
	} while (post);

	return ret;
}

/*
 * Return all the posted receive MADs
 */
static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
{
	struct ib_mad_private_header *mad_priv_hdr;
	struct ib_mad_private *recv;
	struct ib_mad_list_head *mad_list;

2905 2906 2907
	if (!qp_info->qp)
		return;

L
Linus Torvalds 已提交
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
	while (!list_empty(&qp_info->recv_queue.list)) {

		mad_list = list_entry(qp_info->recv_queue.list.next,
				      struct ib_mad_list_head, list);
		mad_priv_hdr = container_of(mad_list,
					    struct ib_mad_private_header,
					    mad_list);
		recv = container_of(mad_priv_hdr, struct ib_mad_private,
				    header);

		/* Remove from posted receive MAD list */
		list_del(&mad_list->list);

2921 2922
		ib_dma_unmap_single(qp_info->port_priv->device,
				    recv->header.mapping,
2923
				    mad_priv_dma_size(recv),
2924
				    DMA_FROM_DEVICE);
2925
		kfree(recv);
L
Linus Torvalds 已提交
2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
	}

	qp_info->recv_queue.count = 0;
}

/*
 * Start the port
 */
static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
{
	int ret, i;
	struct ib_qp_attr *attr;
	struct ib_qp *qp;
2939
	u16 pkey_index;
L
Linus Torvalds 已提交
2940 2941

	attr = kmalloc(sizeof *attr, GFP_KERNEL);
2942
	if (!attr)
L
Linus Torvalds 已提交
2943 2944
		return -ENOMEM;

2945 2946 2947 2948 2949
	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
			   IB_DEFAULT_PKEY_FULL, &pkey_index);
	if (ret)
		pkey_index = 0;

L
Linus Torvalds 已提交
2950 2951
	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
		qp = port_priv->qp_info[i].qp;
2952 2953 2954
		if (!qp)
			continue;

L
Linus Torvalds 已提交
2955 2956 2957 2958 2959
		/*
		 * PKey index for QP1 is irrelevant but
		 * one is needed for the Reset to Init transition
		 */
		attr->qp_state = IB_QPS_INIT;
2960
		attr->pkey_index = pkey_index;
L
Linus Torvalds 已提交
2961 2962 2963 2964
		attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
		ret = ib_modify_qp(qp, attr, IB_QP_STATE |
					     IB_QP_PKEY_INDEX | IB_QP_QKEY);
		if (ret) {
2965 2966 2967
			dev_err(&port_priv->device->dev,
				"Couldn't change QP%d state to INIT: %d\n",
				i, ret);
L
Linus Torvalds 已提交
2968 2969 2970 2971 2972 2973
			goto out;
		}

		attr->qp_state = IB_QPS_RTR;
		ret = ib_modify_qp(qp, attr, IB_QP_STATE);
		if (ret) {
2974 2975 2976
			dev_err(&port_priv->device->dev,
				"Couldn't change QP%d state to RTR: %d\n",
				i, ret);
L
Linus Torvalds 已提交
2977 2978 2979 2980 2981 2982 2983
			goto out;
		}

		attr->qp_state = IB_QPS_RTS;
		attr->sq_psn = IB_MAD_SEND_Q_PSN;
		ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
		if (ret) {
2984 2985 2986
			dev_err(&port_priv->device->dev,
				"Couldn't change QP%d state to RTS: %d\n",
				i, ret);
L
Linus Torvalds 已提交
2987 2988 2989 2990 2991 2992
			goto out;
		}
	}

	ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
	if (ret) {
2993 2994 2995
		dev_err(&port_priv->device->dev,
			"Failed to request completion notification: %d\n",
			ret);
L
Linus Torvalds 已提交
2996 2997 2998 2999
		goto out;
	}

	for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3000 3001 3002
		if (!port_priv->qp_info[i].qp)
			continue;

L
Linus Torvalds 已提交
3003 3004
		ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
		if (ret) {
3005 3006
			dev_err(&port_priv->device->dev,
				"Couldn't post receive WRs\n");
L
Linus Torvalds 已提交
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
			goto out;
		}
	}
out:
	kfree(attr);
	return ret;
}

static void qp_event_handler(struct ib_event *event, void *qp_context)
{
	struct ib_mad_qp_info	*qp_info = qp_context;

	/* It's worse than that! He's dead, Jim! */
3020 3021
	dev_err(&qp_info->port_priv->device->dev,
		"Fatal error (%d) on MAD QP (%d)\n",
L
Linus Torvalds 已提交
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056
		event->event, qp_info->qp->qp_num);
}

static void init_mad_queue(struct ib_mad_qp_info *qp_info,
			   struct ib_mad_queue *mad_queue)
{
	mad_queue->qp_info = qp_info;
	mad_queue->count = 0;
	spin_lock_init(&mad_queue->lock);
	INIT_LIST_HEAD(&mad_queue->list);
}

static void init_mad_qp(struct ib_mad_port_private *port_priv,
			struct ib_mad_qp_info *qp_info)
{
	qp_info->port_priv = port_priv;
	init_mad_queue(qp_info, &qp_info->send_queue);
	init_mad_queue(qp_info, &qp_info->recv_queue);
	INIT_LIST_HEAD(&qp_info->overflow_list);
	spin_lock_init(&qp_info->snoop_lock);
	qp_info->snoop_table = NULL;
	qp_info->snoop_table_size = 0;
	atomic_set(&qp_info->snoop_count, 0);
}

static int create_mad_qp(struct ib_mad_qp_info *qp_info,
			 enum ib_qp_type qp_type)
{
	struct ib_qp_init_attr	qp_init_attr;
	int ret;

	memset(&qp_init_attr, 0, sizeof qp_init_attr);
	qp_init_attr.send_cq = qp_info->port_priv->cq;
	qp_init_attr.recv_cq = qp_info->port_priv->cq;
	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3057 3058
	qp_init_attr.cap.max_send_wr = mad_sendq_size;
	qp_init_attr.cap.max_recv_wr = mad_recvq_size;
L
Linus Torvalds 已提交
3059 3060 3061 3062 3063 3064 3065 3066
	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
	qp_init_attr.qp_type = qp_type;
	qp_init_attr.port_num = qp_info->port_priv->port_num;
	qp_init_attr.qp_context = qp_info;
	qp_init_attr.event_handler = qp_event_handler;
	qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
	if (IS_ERR(qp_info->qp)) {
3067 3068 3069
		dev_err(&qp_info->port_priv->device->dev,
			"Couldn't create ib_mad QP%d\n",
			get_spl_qp_index(qp_type));
L
Linus Torvalds 已提交
3070 3071 3072 3073
		ret = PTR_ERR(qp_info->qp);
		goto error;
	}
	/* Use minimum queue sizes unless the CQ is resized */
3074 3075
	qp_info->send_queue.max_active = mad_sendq_size;
	qp_info->recv_queue.max_active = mad_recvq_size;
L
Linus Torvalds 已提交
3076 3077 3078 3079 3080 3081 3082 3083
	return 0;

error:
	return ret;
}

static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
3084 3085 3086
	if (!qp_info->qp)
		return;

L
Linus Torvalds 已提交
3087
	ib_destroy_qp(qp_info->qp);
3088
	kfree(qp_info->snoop_table);
L
Linus Torvalds 已提交
3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
}

/*
 * Open the port
 * Create the QP, PD, MR, and CQ if needed
 */
static int ib_mad_port_open(struct ib_device *device,
			    int port_num)
{
	int ret, cq_size;
	struct ib_mad_port_private *port_priv;
	unsigned long flags;
	char name[sizeof "ib_mad123"];
3102
	int has_smi;
L
Linus Torvalds 已提交
3103

3104 3105 3106
	if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
		return -EFAULT;

3107 3108 3109 3110
	if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
		    rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
		return -EFAULT;

L
Linus Torvalds 已提交
3111
	/* Create new device info */
R
Roland Dreier 已提交
3112
	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3113
	if (!port_priv)
L
Linus Torvalds 已提交
3114
		return -ENOMEM;
R
Roland Dreier 已提交
3115

L
Linus Torvalds 已提交
3116 3117 3118 3119 3120 3121 3122
	port_priv->device = device;
	port_priv->port_num = port_num;
	spin_lock_init(&port_priv->reg_lock);
	INIT_LIST_HEAD(&port_priv->agent_list);
	init_mad_qp(port_priv, &port_priv->qp_info[0]);
	init_mad_qp(port_priv, &port_priv->qp_info[1]);

3123
	cq_size = mad_sendq_size + mad_recvq_size;
3124
	has_smi = rdma_cap_ib_smi(device, port_num);
3125 3126 3127
	if (has_smi)
		cq_size *= 2;

C
Christoph Hellwig 已提交
3128 3129
	port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
			IB_POLL_WORKQUEUE);
L
Linus Torvalds 已提交
3130
	if (IS_ERR(port_priv->cq)) {
3131
		dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
L
Linus Torvalds 已提交
3132 3133 3134 3135
		ret = PTR_ERR(port_priv->cq);
		goto error3;
	}

3136
	port_priv->pd = ib_alloc_pd(device, 0);
L
Linus Torvalds 已提交
3137
	if (IS_ERR(port_priv->pd)) {
3138
		dev_err(&device->dev, "Couldn't create ib_mad PD\n");
L
Linus Torvalds 已提交
3139 3140 3141 3142
		ret = PTR_ERR(port_priv->pd);
		goto error4;
	}

3143 3144 3145 3146 3147
	if (has_smi) {
		ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
		if (ret)
			goto error6;
	}
L
Linus Torvalds 已提交
3148 3149 3150 3151 3152
	ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
	if (ret)
		goto error7;

	snprintf(name, sizeof name, "ib_mad%d", port_num);
3153
	port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
L
Linus Torvalds 已提交
3154 3155 3156 3157 3158
	if (!port_priv->wq) {
		ret = -ENOMEM;
		goto error8;
	}

3159 3160 3161 3162
	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	list_add_tail(&port_priv->port_list, &ib_mad_port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

L
Linus Torvalds 已提交
3163 3164
	ret = ib_mad_port_start(port_priv);
	if (ret) {
3165
		dev_err(&device->dev, "Couldn't start port\n");
L
Linus Torvalds 已提交
3166 3167 3168 3169 3170 3171
		goto error9;
	}

	return 0;

error9:
3172 3173 3174 3175
	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	list_del_init(&port_priv->port_list);
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

L
Linus Torvalds 已提交
3176 3177 3178 3179 3180 3181 3182 3183
	destroy_workqueue(port_priv->wq);
error8:
	destroy_mad_qp(&port_priv->qp_info[1]);
error7:
	destroy_mad_qp(&port_priv->qp_info[0]);
error6:
	ib_dealloc_pd(port_priv->pd);
error4:
C
Christoph Hellwig 已提交
3184
	ib_free_cq(port_priv->cq);
L
Linus Torvalds 已提交
3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206
	cleanup_recv_queue(&port_priv->qp_info[1]);
	cleanup_recv_queue(&port_priv->qp_info[0]);
error3:
	kfree(port_priv);

	return ret;
}

/*
 * Close the port
 * If there are no classes using the port, free the port
 * resources (CQ, MR, PD, QP) and remove the port's info structure
 */
static int ib_mad_port_close(struct ib_device *device, int port_num)
{
	struct ib_mad_port_private *port_priv;
	unsigned long flags;

	spin_lock_irqsave(&ib_mad_port_list_lock, flags);
	port_priv = __ib_get_mad_port(device, port_num);
	if (port_priv == NULL) {
		spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3207
		dev_err(&device->dev, "Port %d not found\n", port_num);
L
Linus Torvalds 已提交
3208 3209
		return -ENODEV;
	}
3210
	list_del_init(&port_priv->port_list);
L
Linus Torvalds 已提交
3211 3212 3213 3214 3215 3216
	spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);

	destroy_workqueue(port_priv->wq);
	destroy_mad_qp(&port_priv->qp_info[1]);
	destroy_mad_qp(&port_priv->qp_info[0]);
	ib_dealloc_pd(port_priv->pd);
C
Christoph Hellwig 已提交
3217
	ib_free_cq(port_priv->cq);
L
Linus Torvalds 已提交
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
	cleanup_recv_queue(&port_priv->qp_info[1]);
	cleanup_recv_queue(&port_priv->qp_info[0]);
	/* XXX: Handle deallocation of MAD registration tables */

	kfree(port_priv);

	return 0;
}

static void ib_mad_init_device(struct ib_device *device)
{
3229
	int start, i;
L
Linus Torvalds 已提交
3230

3231
	start = rdma_start_port(device);
3232

3233
	for (i = start; i <= rdma_end_port(device); i++) {
3234
		if (!rdma_cap_ib_mad(device, i))
3235 3236
			continue;

3237
		if (ib_mad_port_open(device, i)) {
3238
			dev_err(&device->dev, "Couldn't open port %d\n", i);
3239
			goto error;
L
Linus Torvalds 已提交
3240
		}
3241
		if (ib_agent_port_open(device, i)) {
3242 3243
			dev_err(&device->dev,
				"Couldn't open port %d for agents\n", i);
3244
			goto error_agent;
L
Linus Torvalds 已提交
3245 3246
		}
	}
3247
	return;
L
Linus Torvalds 已提交
3248

3249 3250
error_agent:
	if (ib_mad_port_close(device, i))
3251
		dev_err(&device->dev, "Couldn't close port %d\n", i);
3252 3253

error:
3254
	while (--i >= start) {
3255
		if (!rdma_cap_ib_mad(device, i))
3256
			continue;
3257 3258

		if (ib_agent_port_close(device, i))
3259 3260
			dev_err(&device->dev,
				"Couldn't close port %d for agents\n", i);
3261
		if (ib_mad_port_close(device, i))
3262
			dev_err(&device->dev, "Couldn't close port %d\n", i);
L
Linus Torvalds 已提交
3263 3264 3265
	}
}

3266
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
L
Linus Torvalds 已提交
3267
{
3268
	int i;
3269

3270
	for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3271
		if (!rdma_cap_ib_mad(device, i))
3272 3273 3274
			continue;

		if (ib_agent_port_close(device, i))
3275
			dev_err(&device->dev,
3276 3277 3278
				"Couldn't close port %d for agents\n", i);
		if (ib_mad_port_close(device, i))
			dev_err(&device->dev, "Couldn't close port %d\n", i);
L
Linus Torvalds 已提交
3279 3280 3281 3282 3283 3284 3285 3286 3287
	}
}

static struct ib_client mad_client = {
	.name   = "mad",
	.add = ib_mad_init_device,
	.remove = ib_mad_remove_device
};

3288
int ib_mad_init(void)
L
Linus Torvalds 已提交
3289
{
3290 3291 3292 3293 3294 3295
	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);

	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);

L
Linus Torvalds 已提交
3296 3297 3298
	INIT_LIST_HEAD(&ib_mad_port_list);

	if (ib_register_client(&mad_client)) {
3299
		pr_err("Couldn't register ib_mad client\n");
3300
		return -EINVAL;
L
Linus Torvalds 已提交
3301 3302 3303 3304 3305
	}

	return 0;
}

3306
void ib_mad_cleanup(void)
L
Linus Torvalds 已提交
3307 3308 3309
{
	ib_unregister_client(&mad_client);
}