device.c 53.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/string.h>
#include <linux/errno.h>
37
#include <linux/kernel.h>
L
Linus Torvalds 已提交
38 39
#include <linux/slab.h>
#include <linux/init.h>
40
#include <linux/netdevice.h>
41 42
#include <linux/security.h>
#include <linux/notifier.h>
43
#include <linux/hashtable.h>
R
Roland Dreier 已提交
44
#include <rdma/rdma_netlink.h>
45 46
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
47 48

#include "core_priv.h"
49
#include "restrack.h"
L
Linus Torvalds 已提交
50 51 52 53 54

MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("core kernel InfiniBand API");
MODULE_LICENSE("Dual BSD/GPL");

55
struct workqueue_struct *ib_comp_wq;
56
struct workqueue_struct *ib_comp_unbound_wq;
T
Tejun Heo 已提交
57 58 59
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/*
 * Each of the three rwsem locks (devices, clients, client_data) protects the
 * xarray of the same name. Specifically it allows the caller to assert that
 * the MARK will/will not be changing under the lock, and for devices and
 * clients, that the value in the xarray is still a valid pointer. Change of
 * the MARK is linked to the object state, so holding the lock and testing the
 * MARK also asserts that the contained object is in a certain state.
 *
 * This is used to build a two stage register/unregister flow where objects
 * can continue to be in the xarray even though they are still in progress to
 * register/unregister.
 *
 * The xarray itself provides additional locking, and restartable iteration,
 * which is also relied on.
 *
 * Locks should not be nested, with the exception of client_data, which is
 * allowed to nest under the read side of the other two locks.
 *
 * The devices_rwsem also protects the device name list, any change or
 * assignment of device name must also hold the write side to guarantee unique
 * names.
 */

83 84 85 86 87 88 89 90
/*
 * devices contains devices that have had their names assigned. The
 * devices may not be registered. Users that care about the registration
 * status need to call ib_device_try_get() on the device to ensure it is
 * registered, and keep it registered, for the required duration.
 *
 */
static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
91
static DECLARE_RWSEM(devices_rwsem);
92 93
#define DEVICE_REGISTERED XA_MARK_1

L
Linus Torvalds 已提交
94
static LIST_HEAD(client_list);
95 96
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
97
static DECLARE_RWSEM(clients_rwsem);
L
Linus Torvalds 已提交
98 99

/*
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
 * If client_data is registered then the corresponding client must also still
 * be registered.
 */
#define CLIENT_DATA_REGISTERED XA_MARK_1
/*
 * xarray has this behavior where it won't iterate over NULL values stored in
 * allocated arrays.  So we need our own iterator to see all values stored in
 * the array. This does the same thing as xa_for_each except that it also
 * returns NULL valued entries if the array is allocating. Simplified to only
 * work on simple xarrays.
 */
static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
			     xa_mark_t filter)
{
	XA_STATE(xas, xa, *indexp);
	void *entry;

	rcu_read_lock();
	do {
		entry = xas_find_marked(&xas, ULONG_MAX, filter);
		if (xa_is_zero(entry))
			break;
	} while (xas_retry(&xas, entry));
	rcu_read_unlock();

	if (entry) {
		*indexp = xas.xa_index;
		if (xa_is_zero(entry))
			return NULL;
		return entry;
	}
	return XA_ERROR(-ENOENT);
}
#define xan_for_each_marked(xa, index, entry, filter)                          \
	for (index = 0, entry = xan_find_marked(xa, &(index), filter);         \
	     !xa_is_err(entry);                                                \
	     (index)++, entry = xan_find_marked(xa, &(index), filter))

138 139 140 141
/* RCU hash table mapping netdevice pointers to struct ib_port_data */
static DEFINE_SPINLOCK(ndev_hash_lock);
static DECLARE_HASHTABLE(ndev_hash, 5);

142
static void free_netdevs(struct ib_device *ib_dev);
143 144
static void ib_unregister_work(struct work_struct *work);
static void __ib_unregister_device(struct ib_device *device);
145 146 147 148 149 150 151 152
static int ib_security_change(struct notifier_block *nb, unsigned long event,
			      void *lsm_data);
static void ib_policy_change_task(struct work_struct *work);
static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);

static struct notifier_block ibdev_lsm_nb = {
	.notifier_call = ib_security_change,
};
L
Linus Torvalds 已提交
153

154 155 156 157 158 159
/* Pointer to the RCU head at the start of the ib_port_data array */
struct ib_port_data_rcu {
	struct rcu_head rcu_head;
	struct ib_port_data pdata[];
};

L
Linus Torvalds 已提交
160 161
static int ib_device_check_mandatory(struct ib_device *device)
{
K
Kamal Heib 已提交
162
#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
L
Linus Torvalds 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	static const struct {
		size_t offset;
		char  *name;
	} mandatory_table[] = {
		IB_MANDATORY_FUNC(query_device),
		IB_MANDATORY_FUNC(query_port),
		IB_MANDATORY_FUNC(query_pkey),
		IB_MANDATORY_FUNC(alloc_pd),
		IB_MANDATORY_FUNC(dealloc_pd),
		IB_MANDATORY_FUNC(create_qp),
		IB_MANDATORY_FUNC(modify_qp),
		IB_MANDATORY_FUNC(destroy_qp),
		IB_MANDATORY_FUNC(post_send),
		IB_MANDATORY_FUNC(post_recv),
		IB_MANDATORY_FUNC(create_cq),
		IB_MANDATORY_FUNC(destroy_cq),
		IB_MANDATORY_FUNC(poll_cq),
		IB_MANDATORY_FUNC(req_notify_cq),
		IB_MANDATORY_FUNC(get_dma_mr),
182 183
		IB_MANDATORY_FUNC(dereg_mr),
		IB_MANDATORY_FUNC(get_port_immutable)
L
Linus Torvalds 已提交
184 185 186
	};
	int i;

187
	device->kverbs_provider = true;
188
	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
K
Kamal Heib 已提交
189 190
		if (!*(void **) ((void *) &device->ops +
				 mandatory_table[i].offset)) {
191 192
			device->kverbs_provider = false;
			break;
L
Linus Torvalds 已提交
193 194 195 196 197 198
		}
	}

	return 0;
}

199
/*
200 201
 * Caller must perform ib_device_put() to return the device reference count
 * when ib_device_get_by_index() returns valid device pointer.
202 203 204 205 206
 */
struct ib_device *ib_device_get_by_index(u32 index)
{
	struct ib_device *device;

207
	down_read(&devices_rwsem);
208
	device = xa_load(&devices, index);
209
	if (device) {
210
		if (!ib_device_try_get(device))
211 212
			device = NULL;
	}
213
	up_read(&devices_rwsem);
214 215 216
	return device;
}

217 218 219 220 221 222 223
/**
 * ib_device_put - Release IB device reference
 * @device: device whose reference to be released
 *
 * ib_device_put() releases reference to the IB device to allow it to be
 * unregistered and eventually free.
 */
224 225 226 227 228
void ib_device_put(struct ib_device *device)
{
	if (refcount_dec_and_test(&device->refcount))
		complete(&device->unreg_completion);
}
229
EXPORT_SYMBOL(ib_device_put);
230

L
Linus Torvalds 已提交
231 232 233
static struct ib_device *__ib_device_get_by_name(const char *name)
{
	struct ib_device *device;
234
	unsigned long index;
L
Linus Torvalds 已提交
235

236
	xa_for_each (&devices, index, device)
237
		if (!strcmp(name, dev_name(&device->dev)))
L
Linus Torvalds 已提交
238 239 240 241 242
			return device;

	return NULL;
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/**
 * ib_device_get_by_name - Find an IB device by name
 * @name: The name to look for
 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
 *
 * Find and hold an ib_device by its name. The caller must call
 * ib_device_put() on the returned pointer.
 */
struct ib_device *ib_device_get_by_name(const char *name,
					enum rdma_driver_id driver_id)
{
	struct ib_device *device;

	down_read(&devices_rwsem);
	device = __ib_device_get_by_name(name);
	if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
	    device->driver_id != driver_id)
		device = NULL;

	if (device) {
		if (!ib_device_try_get(device))
			device = NULL;
	}
	up_read(&devices_rwsem);
	return device;
}
EXPORT_SYMBOL(ib_device_get_by_name);

271 272
int ib_device_rename(struct ib_device *ibdev, const char *name)
{
273
	int ret;
274

275
	down_write(&devices_rwsem);
276 277 278 279 280
	if (!strcmp(name, dev_name(&ibdev->dev))) {
		ret = 0;
		goto out;
	}

281 282 283
	if (__ib_device_get_by_name(name)) {
		ret = -EEXIST;
		goto out;
284 285 286 287 288 289 290
	}

	ret = device_rename(&ibdev->dev, name);
	if (ret)
		goto out;
	strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
out:
291
	up_write(&devices_rwsem);
292 293 294
	return ret;
}

295
static int alloc_name(struct ib_device *ibdev, const char *name)
L
Linus Torvalds 已提交
296 297
{
	struct ib_device *device;
298
	unsigned long index;
299 300
	struct ida inuse;
	int rc;
L
Linus Torvalds 已提交
301 302
	int i;

303
	lockdep_assert_held_exclusive(&devices_rwsem);
304
	ida_init(&inuse);
305
	xa_for_each (&devices, index, device) {
306 307
		char buf[IB_DEVICE_NAME_MAX];

308
		if (sscanf(dev_name(&device->dev), name, &i) != 1)
L
Linus Torvalds 已提交
309
			continue;
310
		if (i < 0 || i >= INT_MAX)
L
Linus Torvalds 已提交
311 312
			continue;
		snprintf(buf, sizeof buf, name, i);
313 314 315 316 317 318
		if (strcmp(buf, dev_name(&device->dev)) != 0)
			continue;

		rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
		if (rc < 0)
			goto out;
L
Linus Torvalds 已提交
319 320
	}

321 322 323
	rc = ida_alloc(&inuse, GFP_KERNEL);
	if (rc < 0)
		goto out;
L
Linus Torvalds 已提交
324

325 326 327 328
	rc = dev_set_name(&ibdev->dev, name, rc);
out:
	ida_destroy(&inuse);
	return rc;
L
Linus Torvalds 已提交
329 330
}

331 332 333 334
static void ib_device_release(struct device *device)
{
	struct ib_device *dev = container_of(device, struct ib_device, dev);

335
	free_netdevs(dev);
336
	WARN_ON(refcount_read(&dev->refcount));
337
	ib_cache_release_one(dev);
338
	ib_security_release_port_pkey_list(dev);
339
	xa_destroy(&dev->client_data);
340 341 342 343 344
	if (dev->port_data)
		kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
				       pdata[0]),
			  rcu_head);
	kfree_rcu(dev, rcu_head);
345 346 347 348 349
}

static int ib_device_uevent(struct device *device,
			    struct kobj_uevent_env *env)
{
350
	if (add_uevent_var(env, "NAME=%s", dev_name(device)))
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
		return -ENOMEM;

	/*
	 * It would be nice to pass the node GUID with the event...
	 */

	return 0;
}

static struct class ib_class = {
	.name    = "infiniband",
	.dev_release = ib_device_release,
	.dev_uevent = ib_device_uevent,
};

L
Linus Torvalds 已提交
366
/**
367
 * _ib_alloc_device - allocate an IB device struct
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375
 * @size:size of structure to allocate
 *
 * Low-level drivers should use ib_alloc_device() to allocate &struct
 * ib_device.  @size is the size of the structure to be allocated,
 * including any private data used by the low-level driver.
 * ib_dealloc_device() must be used to free structures allocated with
 * ib_alloc_device().
 */
376
struct ib_device *_ib_alloc_device(size_t size)
L
Linus Torvalds 已提交
377
{
378 379 380 381 382 383 384 385 386
	struct ib_device *device;

	if (WARN_ON(size < sizeof(struct ib_device)))
		return NULL;

	device = kzalloc(size, GFP_KERNEL);
	if (!device)
		return NULL;

387 388 389 390
	if (rdma_restrack_init(device)) {
		kfree(device);
		return NULL;
	}
391

392
	device->dev.class = &ib_class;
393 394
	device->groups[0] = &ib_dev_attr_group;
	device->dev.groups = device->groups;
395 396 397 398
	device_initialize(&device->dev);

	INIT_LIST_HEAD(&device->event_handler_list);
	spin_lock_init(&device->event_handler_lock);
399
	mutex_init(&device->unregistration_lock);
400 401 402 403 404
	/*
	 * client_data needs to be alloc because we don't want our mark to be
	 * destroyed if the user stores NULL in the client data.
	 */
	xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
405
	init_rwsem(&device->client_data_rwsem);
406
	INIT_LIST_HEAD(&device->port_list);
407
	init_completion(&device->unreg_completion);
408
	INIT_WORK(&device->unregistration_work, ib_unregister_work);
L
Linus Torvalds 已提交
409

410
	return device;
L
Linus Torvalds 已提交
411
}
412
EXPORT_SYMBOL(_ib_alloc_device);
L
Linus Torvalds 已提交
413 414 415 416 417 418 419 420 421

/**
 * ib_dealloc_device - free an IB device struct
 * @device:structure to free
 *
 * Free a structure allocated with ib_alloc_device().
 */
void ib_dealloc_device(struct ib_device *device)
{
422 423 424 425 426 427 428 429 430 431 432 433 434 435
	if (device->ops.dealloc_driver)
		device->ops.dealloc_driver(device);

	/*
	 * ib_unregister_driver() requires all devices to remain in the xarray
	 * while their ops are callable. The last op we call is dealloc_driver
	 * above.  This is needed to create a fence on op callbacks prior to
	 * allowing the driver module to unload.
	 */
	down_write(&devices_rwsem);
	if (xa_load(&devices, device->index) == device)
		xa_erase(&devices, device->index);
	up_write(&devices_rwsem);

436 437 438
	/* Expedite releasing netdev references */
	free_netdevs(device);

439
	WARN_ON(!xa_empty(&device->client_data));
440
	WARN_ON(refcount_read(&device->refcount));
441
	rdma_restrack_clean(device);
442
	/* Balances with device_initialize */
443
	put_device(&device->dev);
L
Linus Torvalds 已提交
444 445 446
}
EXPORT_SYMBOL(ib_dealloc_device);

447 448 449 450 451 452 453 454 455 456
/*
 * add_client_context() and remove_client_context() must be safe against
 * parallel calls on the same device - registration/unregistration of both the
 * device and client can be occurring in parallel.
 *
 * The routines need to be a fence, any caller must not return until the add
 * or remove is fully completed.
 */
static int add_client_context(struct ib_device *device,
			      struct ib_client *client)
L
Linus Torvalds 已提交
457
{
458
	int ret = 0;
L
Linus Torvalds 已提交
459

460
	if (!device->kverbs_provider && !client->no_kverbs_req)
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
		return 0;

	down_write(&device->client_data_rwsem);
	/*
	 * Another caller to add_client_context got here first and has already
	 * completely initialized context.
	 */
	if (xa_get_mark(&device->client_data, client->client_id,
		    CLIENT_DATA_REGISTERED))
		goto out;

	ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
			      GFP_KERNEL));
	if (ret)
		goto out;
	downgrade_write(&device->client_data_rwsem);
	if (client->add)
		client->add(device);

	/* Readers shall not see a client until add has been completed */
	xa_set_mark(&device->client_data, client->client_id,
		    CLIENT_DATA_REGISTERED);
	up_read(&device->client_data_rwsem);
	return 0;

out:
	up_write(&device->client_data_rwsem);
	return ret;
}

static void remove_client_context(struct ib_device *device,
				  unsigned int client_id)
{
	struct ib_client *client;
	void *client_data;
496

497 498 499 500 501 502 503 504 505 506
	down_write(&device->client_data_rwsem);
	if (!xa_get_mark(&device->client_data, client_id,
			 CLIENT_DATA_REGISTERED)) {
		up_write(&device->client_data_rwsem);
		return;
	}
	client_data = xa_load(&device->client_data, client_id);
	xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
	client = xa_load(&clients, client_id);
	downgrade_write(&device->client_data_rwsem);
L
Linus Torvalds 已提交
507

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
	/*
	 * Notice we cannot be holding any exclusive locks when calling the
	 * remove callback as the remove callback can recurse back into any
	 * public functions in this module and thus try for any locks those
	 * functions take.
	 *
	 * For this reason clients and drivers should not call the
	 * unregistration functions will holdling any locks.
	 *
	 * It tempting to drop the client_data_rwsem too, but this is required
	 * to ensure that unregister_client does not return until all clients
	 * are completely unregistered, which is required to avoid module
	 * unloading races.
	 */
	if (client->remove)
		client->remove(device, client_data);

	xa_erase(&device->client_data, client_id);
	up_read(&device->client_data_rwsem);
L
Linus Torvalds 已提交
527 528
}

529
static int alloc_port_data(struct ib_device *device)
530
{
531
	struct ib_port_data_rcu *pdata_rcu;
532
	unsigned int port;
533 534 535 536 537 538 539

	if (device->port_data)
		return 0;

	/* This can only be called once the physical port range is defined */
	if (WARN_ON(!device->phys_port_cnt))
		return -EINVAL;
540

541 542
	/*
	 * device->port_data is indexed directly by the port number to make
543 544
	 * access to this data as efficient as possible.
	 *
545 546
	 * Therefore port_data is declared as a 1 based array with potential
	 * empty slots at the beginning.
547
	 */
548 549 550 551
	pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
					rdma_end_port(device) + 1),
			    GFP_KERNEL);
	if (!pdata_rcu)
552
		return -ENOMEM;
553 554 555 556 557 558
	/*
	 * The rcu_head is put in front of the port data array and the stored
	 * pointer is adjusted since we never need to see that member until
	 * kfree_rcu.
	 */
	device->port_data = pdata_rcu->pdata;
559

560
	rdma_for_each_port (device, port) {
561 562
		struct ib_port_data *pdata = &device->port_data[port];

563
		pdata->ib_dev = device;
564 565
		spin_lock_init(&pdata->pkey_list_lock);
		INIT_LIST_HEAD(&pdata->pkey_list);
566
		spin_lock_init(&pdata->netdev_lock);
567
		INIT_HLIST_NODE(&pdata->ndev_hash_link);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	}
	return 0;
}

static int verify_immutable(const struct ib_device *dev, u8 port)
{
	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
			    rdma_max_mad_size(dev, port) != 0);
}

static int setup_port_data(struct ib_device *device)
{
	unsigned int port;
	int ret;

	ret = alloc_port_data(device);
	if (ret)
		return ret;

	rdma_for_each_port (device, port) {
		struct ib_port_data *pdata = &device->port_data[port];
589 590 591

		ret = device->ops.get_port_immutable(device, port,
						     &pdata->immutable);
592
		if (ret)
593
			return ret;
594

595 596
		if (verify_immutable(device, port))
			return -EINVAL;
597
	}
598
	return 0;
599 600
}

601
void ib_get_device_fw_str(struct ib_device *dev, char *str)
602
{
K
Kamal Heib 已提交
603 604
	if (dev->ops.get_dev_fw_str)
		dev->ops.get_dev_fw_str(dev, str);
605 606 607 608 609
	else
		str[0] = '\0';
}
EXPORT_SYMBOL(ib_get_device_fw_str);

610 611 612
static void ib_policy_change_task(struct work_struct *work)
{
	struct ib_device *dev;
613
	unsigned long index;
614

615
	down_read(&devices_rwsem);
616
	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
617
		unsigned int i;
618

619
		rdma_for_each_port (dev, i) {
620 621 622 623 624 625 626 627
			u64 sp;
			int ret = ib_get_cached_subnet_prefix(dev,
							      i,
							      &sp);

			WARN_ONCE(ret,
				  "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
				  ret);
628 629
			if (!ret)
				ib_security_cache_change(dev, i, sp);
630 631
		}
	}
632
	up_read(&devices_rwsem);
633 634 635 636 637 638 639 640 641
}

static int ib_security_change(struct notifier_block *nb, unsigned long event,
			      void *lsm_data)
{
	if (event != LSM_POLICY_CHANGE)
		return NOTIFY_DONE;

	schedule_work(&ib_policy_change_work);
642
	ib_mad_agent_security_change();
643 644 645 646

	return NOTIFY_OK;
}

647
/*
648 649
 * Assign the unique string device name and the unique device index. This is
 * undone by ib_dealloc_device.
650
 */
651
static int assign_name(struct ib_device *device, const char *name)
652
{
653 654
	static u32 last_id;
	int ret;
655

656
	down_write(&devices_rwsem);
657 658 659 660 661 662 663 664 665 666 667 668 669
	/* Assign a unique name to the device */
	if (strchr(name, '%'))
		ret = alloc_name(device, name);
	else
		ret = dev_set_name(&device->dev, name);
	if (ret)
		goto out;

	if (__ib_device_get_by_name(dev_name(&device->dev))) {
		ret = -ENFILE;
		goto out;
	}
	strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
670

671 672 673 674 675 676 677
	/* Cyclically allocate a user visible ID for the device */
	device->index = last_id;
	ret = xa_alloc(&devices, &device->index, INT_MAX, device, GFP_KERNEL);
	if (ret == -ENOSPC) {
		device->index = 0;
		ret = xa_alloc(&devices, &device->index, INT_MAX, device,
			       GFP_KERNEL);
678
	}
679 680 681 682 683
	if (ret)
		goto out;
	last_id = device->index + 1;

	ret = 0;
684

685
out:
686
	up_write(&devices_rwsem);
687 688 689
	return ret;
}

690
static void setup_dma_device(struct ib_device *device)
L
Linus Torvalds 已提交
691
{
692 693
	struct device *parent = device->dev.parent;

694 695 696 697 698 699 700 701
	WARN_ON_ONCE(device->dma_device);
	if (device->dev.dma_ops) {
		/*
		 * The caller provided custom DMA operations. Copy the
		 * DMA-related fields that are used by e.g. dma_alloc_coherent()
		 * into device->dev.
		 */
		device->dma_device = &device->dev;
702 703 704 705 706 707 708 709 710 711 712 713 714
		if (!device->dev.dma_mask) {
			if (parent)
				device->dev.dma_mask = parent->dma_mask;
			else
				WARN_ON_ONCE(true);
		}
		if (!device->dev.coherent_dma_mask) {
			if (parent)
				device->dev.coherent_dma_mask =
					parent->coherent_dma_mask;
			else
				WARN_ON_ONCE(true);
		}
715 716 717 718 719
	} else {
		/*
		 * The caller did not provide custom DMA operations. Use the
		 * DMA mapping operations of the parent device.
		 */
720
		WARN_ON_ONCE(!parent);
721 722
		device->dma_device = parent;
	}
723
}
L
Linus Torvalds 已提交
724

725 726 727 728 729
/*
 * setup_device() allocates memory and sets up data that requires calling the
 * device ops, this is the only reason these actions are not done during
 * ib_alloc_device. It is undone by ib_dealloc_device().
 */
730 731 732 733
static int setup_device(struct ib_device *device)
{
	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
	int ret;
L
Linus Torvalds 已提交
734

735 736
	setup_dma_device(device);

737 738 739
	ret = ib_device_check_mandatory(device);
	if (ret)
		return ret;
L
Linus Torvalds 已提交
740

741
	ret = setup_port_data(device);
742
	if (ret) {
743
		dev_warn(&device->dev, "Couldn't create per-port data\n");
744 745 746 747
		return ret;
	}

	memset(&device->attrs, 0, sizeof(device->attrs));
K
Kamal Heib 已提交
748
	ret = device->ops.query_device(device, &device->attrs, &uhw);
749 750 751
	if (ret) {
		dev_warn(&device->dev,
			 "Couldn't query the device attributes\n");
752
		return ret;
753 754
	}

755
	return 0;
756 757
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static void disable_device(struct ib_device *device)
{
	struct ib_client *client;

	WARN_ON(!refcount_read(&device->refcount));

	down_write(&devices_rwsem);
	xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
	up_write(&devices_rwsem);

	down_read(&clients_rwsem);
	list_for_each_entry_reverse(client, &client_list, list)
		remove_client_context(device, client->client_id);
	up_read(&clients_rwsem);

	/* Pairs with refcount_set in enable_device */
	ib_device_put(device);
	wait_for_completion(&device->unreg_completion);
776 777 778

	/* Expedite removing unregistered pointers from the hash table */
	free_netdevs(device);
779 780 781 782
}

/*
 * An enabled device is visible to all clients and to all the public facing
783 784
 * APIs that return a device pointer. This always returns with a new get, even
 * if it fails.
785
 */
786
static int enable_device_and_get(struct ib_device *device)
787 788 789
{
	struct ib_client *client;
	unsigned long index;
790
	int ret = 0;
791

792 793 794 795 796
	/*
	 * One ref belongs to the xa and the other belongs to this
	 * thread. This is needed to guard against parallel unregistration.
	 */
	refcount_set(&device->refcount, 2);
797 798
	down_write(&devices_rwsem);
	xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
799 800 801 802 803 804

	/*
	 * By using downgrade_write() we ensure that no other thread can clear
	 * DEVICE_REGISTERED while we are completing the client setup.
	 */
	downgrade_write(&devices_rwsem);
805

806 807 808 809 810 811
	if (device->ops.enable_driver) {
		ret = device->ops.enable_driver(device);
		if (ret)
			goto out;
	}

812 813 814
	down_read(&clients_rwsem);
	xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
		ret = add_client_context(device, client);
815 816
		if (ret)
			break;
817 818
	}
	up_read(&clients_rwsem);
819 820

out:
821 822
	up_read(&devices_rwsem);
	return ret;
823 824
}

825 826 827 828 829 830 831 832
/**
 * ib_register_device - Register an IB device with IB core
 * @device:Device to register
 *
 * Low-level drivers use ib_register_device() to register their
 * devices with the IB core.  All registered clients will receive a
 * callback for each device that is added. @device must be allocated
 * with ib_alloc_device().
833 834 835 836
 *
 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
 * asynchronously then the device pointer may become freed as soon as this
 * function returns.
837
 */
838
int ib_register_device(struct ib_device *device, const char *name)
839 840 841
{
	int ret;

842 843
	ret = assign_name(device, name);
	if (ret)
844
		return ret;
845 846 847

	ret = setup_device(device);
	if (ret)
848
		return ret;
849

850 851 852 853
	ret = ib_cache_setup_one(device);
	if (ret) {
		dev_warn(&device->dev,
			 "Couldn't set up InfiniBand P_Key/GID cache\n");
854
		return ret;
855 856
	}

857
	ib_device_register_rdmacg(device);
858

859 860 861 862
	ret = device_add(&device->dev);
	if (ret)
		goto cg_cleanup;

863
	ret = ib_device_register_sysfs(device);
L
Linus Torvalds 已提交
864
	if (ret) {
865 866
		dev_warn(&device->dev,
			 "Couldn't register device with driver model\n");
867
		goto dev_cleanup;
L
Linus Torvalds 已提交
868 869
	}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
	ret = enable_device_and_get(device);
	if (ret) {
		void (*dealloc_fn)(struct ib_device *);

		/*
		 * If we hit this error flow then we don't want to
		 * automatically dealloc the device since the caller is
		 * expected to call ib_dealloc_device() after
		 * ib_register_device() fails. This is tricky due to the
		 * possibility for a parallel unregistration along with this
		 * error flow. Since we have a refcount here we know any
		 * parallel flow is stopped in disable_device and will see the
		 * NULL pointers, causing the responsibility to
		 * ib_dealloc_device() to revert back to this thread.
		 */
		dealloc_fn = device->ops.dealloc_driver;
		device->ops.dealloc_driver = NULL;
		ib_device_put(device);
		__ib_unregister_device(device);
		device->ops.dealloc_driver = dealloc_fn;
		return ret;
	}
	ib_device_put(device);
L
Linus Torvalds 已提交
893

894 895
	return 0;

896 897
dev_cleanup:
	device_del(&device->dev);
898 899
cg_cleanup:
	ib_device_unregister_rdmacg(device);
900
	ib_cache_cleanup_one(device);
L
Linus Torvalds 已提交
901 902 903 904
	return ret;
}
EXPORT_SYMBOL(ib_register_device);

905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
/* Callers must hold a get on the device. */
static void __ib_unregister_device(struct ib_device *ib_dev)
{
	/*
	 * We have a registration lock so that all the calls to unregister are
	 * fully fenced, once any unregister returns the device is truely
	 * unregistered even if multiple callers are unregistering it at the
	 * same time. This also interacts with the registration flow and
	 * provides sane semantics if register and unregister are racing.
	 */
	mutex_lock(&ib_dev->unregistration_lock);
	if (!refcount_read(&ib_dev->refcount))
		goto out;

	disable_device(ib_dev);
	ib_device_unregister_sysfs(ib_dev);
	device_del(&ib_dev->dev);
	ib_device_unregister_rdmacg(ib_dev);
	ib_cache_cleanup_one(ib_dev);

	/*
	 * Drivers using the new flow may not call ib_dealloc_device except
	 * in error unwind prior to registration success.
	 */
	if (ib_dev->ops.dealloc_driver) {
		WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
		ib_dealloc_device(ib_dev);
	}
out:
	mutex_unlock(&ib_dev->unregistration_lock);
}

L
Linus Torvalds 已提交
937 938
/**
 * ib_unregister_device - Unregister an IB device
939
 * @device: The device to unregister
L
Linus Torvalds 已提交
940 941
 *
 * Unregister an IB device.  All clients will receive a remove callback.
942 943 944 945 946 947 948 949
 *
 * Callers should call this routine only once, and protect against races with
 * registration. Typically it should only be called as part of a remove
 * callback in an implementation of driver core's struct device_driver and
 * related.
 *
 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
 * this function.
L
Linus Torvalds 已提交
950
 */
951
void ib_unregister_device(struct ib_device *ib_dev)
L
Linus Torvalds 已提交
952
{
953 954 955
	get_device(&ib_dev->dev);
	__ib_unregister_device(ib_dev);
	put_device(&ib_dev->dev);
L
Linus Torvalds 已提交
956 957 958
}
EXPORT_SYMBOL(ib_unregister_device);

959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
/**
 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
 * device: The device to unregister
 *
 * This is the same as ib_unregister_device(), except it includes an internal
 * ib_device_put() that should match a 'get' obtained by the caller.
 *
 * It is safe to call this routine concurrently from multiple threads while
 * holding the 'get'. When the function returns the device is fully
 * unregistered.
 *
 * Drivers using this flow MUST use the driver_unregister callback to clean up
 * their resources associated with the device and dealloc it.
 */
void ib_unregister_device_and_put(struct ib_device *ib_dev)
{
	WARN_ON(!ib_dev->ops.dealloc_driver);
	get_device(&ib_dev->dev);
	ib_device_put(ib_dev);
	__ib_unregister_device(ib_dev);
	put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device_and_put);

/**
 * ib_unregister_driver - Unregister all IB devices for a driver
 * @driver_id: The driver to unregister
 *
 * This implements a fence for device unregistration. It only returns once all
 * devices associated with the driver_id have fully completed their
 * unregistration and returned from ib_unregister_device*().
 *
 * If device's are not yet unregistered it goes ahead and starts unregistering
 * them.
 *
 * This does not block creation of new devices with the given driver_id, that
 * is the responsibility of the caller.
 */
void ib_unregister_driver(enum rdma_driver_id driver_id)
{
	struct ib_device *ib_dev;
	unsigned long index;

	down_read(&devices_rwsem);
	xa_for_each (&devices, index, ib_dev) {
		if (ib_dev->driver_id != driver_id)
			continue;

		get_device(&ib_dev->dev);
		up_read(&devices_rwsem);

		WARN_ON(!ib_dev->ops.dealloc_driver);
		__ib_unregister_device(ib_dev);

		put_device(&ib_dev->dev);
		down_read(&devices_rwsem);
	}
	up_read(&devices_rwsem);
}
EXPORT_SYMBOL(ib_unregister_driver);

static void ib_unregister_work(struct work_struct *work)
{
	struct ib_device *ib_dev =
		container_of(work, struct ib_device, unregistration_work);

	__ib_unregister_device(ib_dev);
	put_device(&ib_dev->dev);
}

/**
 * ib_unregister_device_queued - Unregister a device using a work queue
 * device: The device to unregister
 *
 * This schedules an asynchronous unregistration using a WQ for the device. A
 * driver should use this to avoid holding locks while doing unregistration,
 * such as holding the RTNL lock.
 *
 * Drivers using this API must use ib_unregister_driver before module unload
 * to ensure that all scheduled unregistrations have completed.
 */
void ib_unregister_device_queued(struct ib_device *ib_dev)
{
	WARN_ON(!refcount_read(&ib_dev->refcount));
	WARN_ON(!ib_dev->ops.dealloc_driver);
	get_device(&ib_dev->dev);
	if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
		put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device_queued);

1050 1051 1052 1053
static int assign_client_id(struct ib_client *client)
{
	int ret;

1054
	down_write(&clients_rwsem);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	/*
	 * The add/remove callbacks must be called in FIFO/LIFO order. To
	 * achieve this we assign client_ids so they are sorted in
	 * registration order, and retain a linked list we can reverse iterate
	 * to get the LIFO order. The extra linked list can go away if xarray
	 * learns to reverse iterate.
	 */
	if (list_empty(&client_list))
		client->client_id = 0;
	else
		client->client_id =
			list_last_entry(&client_list, struct ib_client, list)
				->client_id;
	ret = xa_alloc(&clients, &client->client_id, INT_MAX, client,
		       GFP_KERNEL);
	if (ret)
		goto out;

1073 1074 1075
	xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
	list_add_tail(&client->list, &client_list);

1076
out:
1077
	up_write(&clients_rwsem);
1078 1079 1080
	return ret;
}

L
Linus Torvalds 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
/**
 * ib_register_client - Register an IB client
 * @client:Client to register
 *
 * Upper level users of the IB drivers can use ib_register_client() to
 * register callbacks for IB device addition and removal.  When an IB
 * device is added, each registered client's add method will be called
 * (in the order the clients were registered), and when a device is
 * removed, each client's remove method will be called (in the reverse
 * order that clients were registered).  In addition, when
 * ib_register_client() is called, the client will receive an add
 * callback for all devices already registered.
 */
int ib_register_client(struct ib_client *client)
{
	struct ib_device *device;
1097
	unsigned long index;
1098
	int ret;
L
Linus Torvalds 已提交
1099

1100
	ret = assign_client_id(client);
1101
	if (ret)
1102
		return ret;
L
Linus Torvalds 已提交
1103

1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	down_read(&devices_rwsem);
	xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
		ret = add_client_context(device, client);
		if (ret) {
			up_read(&devices_rwsem);
			ib_unregister_client(client);
			return ret;
		}
	}
	up_read(&devices_rwsem);
L
Linus Torvalds 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
	return 0;
}
EXPORT_SYMBOL(ib_register_client);

/**
 * ib_unregister_client - Unregister an IB client
 * @client:Client to unregister
 *
 * Upper level users use ib_unregister_client() to remove their client
 * registration.  When ib_unregister_client() is called, the client
 * will receive a remove callback for each IB device still registered.
1125 1126 1127
 *
 * This is a full fence, once it returns no client callbacks will be called,
 * or are running in another thread.
L
Linus Torvalds 已提交
1128 1129 1130 1131
 */
void ib_unregister_client(struct ib_client *client)
{
	struct ib_device *device;
1132
	unsigned long index;
L
Linus Torvalds 已提交
1133

1134
	down_write(&clients_rwsem);
1135
	xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1136 1137 1138 1139 1140 1141 1142 1143 1144
	up_write(&clients_rwsem);
	/*
	 * Every device still known must be serialized to make sure we are
	 * done with the client callbacks before we return.
	 */
	down_read(&devices_rwsem);
	xa_for_each (&devices, index, device)
		remove_client_context(device, client->client_id);
	up_read(&devices_rwsem);
L
Linus Torvalds 已提交
1145

1146
	down_write(&clients_rwsem);
1147 1148
	list_del(&client->list);
	xa_erase(&clients, client->client_id);
1149
	up_write(&clients_rwsem);
L
Linus Torvalds 已提交
1150 1151 1152 1153
}
EXPORT_SYMBOL(ib_unregister_client);

/**
1154
 * ib_set_client_data - Set IB client context
L
Linus Torvalds 已提交
1155 1156 1157 1158
 * @device:Device to set context for
 * @client:Client to set context for
 * @data:Context to set
 *
1159 1160 1161 1162
 * ib_set_client_data() sets client context data that can be retrieved with
 * ib_get_client_data(). This can only be called while the client is
 * registered to the device, once the ib_client remove() callback returns this
 * cannot be called.
L
Linus Torvalds 已提交
1163 1164 1165 1166
 */
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
			void *data)
{
1167
	void *rc;
L
Linus Torvalds 已提交
1168

1169 1170
	if (WARN_ON(IS_ERR(data)))
		data = NULL;
L
Linus Torvalds 已提交
1171

1172 1173 1174
	rc = xa_store(&device->client_data, client->client_id, data,
		      GFP_KERNEL);
	WARN_ON(xa_is_err(rc));
L
Linus Torvalds 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
}
EXPORT_SYMBOL(ib_set_client_data);

/**
 * ib_register_event_handler - Register an IB event handler
 * @event_handler:Handler to register
 *
 * ib_register_event_handler() registers an event handler that will be
 * called back when asynchronous IB events occur (as defined in
 * chapter 11 of the InfiniBand Architecture Specification).  This
 * callback may occur in interrupt context.
 */
1187
void ib_register_event_handler(struct ib_event_handler *event_handler)
L
Linus Torvalds 已提交
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
{
	unsigned long flags;

	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
	list_add_tail(&event_handler->list,
		      &event_handler->device->event_handler_list);
	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
}
EXPORT_SYMBOL(ib_register_event_handler);

/**
 * ib_unregister_event_handler - Unregister an event handler
 * @event_handler:Handler to unregister
 *
 * Unregister an event handler registered with
 * ib_register_event_handler().
 */
1205
void ib_unregister_event_handler(struct ib_event_handler *event_handler)
L
Linus Torvalds 已提交
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
{
	unsigned long flags;

	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
	list_del(&event_handler->list);
	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
}
EXPORT_SYMBOL(ib_unregister_event_handler);

/**
 * ib_dispatch_event - Dispatch an asynchronous event
 * @event:Event to dispatch
 *
 * Low-level drivers must call ib_dispatch_event() to dispatch the
 * event to all registered event handlers when an asynchronous event
 * occurs.
 */
void ib_dispatch_event(struct ib_event *event)
{
	unsigned long flags;
	struct ib_event_handler *handler;

	spin_lock_irqsave(&event->device->event_handler_lock, flags);

	list_for_each_entry(handler, &event->device->event_handler_list, list)
		handler->handler(handler, event);

	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
}
EXPORT_SYMBOL(ib_dispatch_event);

/**
 * ib_query_port - Query IB port attributes
 * @device:Device to query
 * @port_num:Port number to query
 * @port_attr:Port attributes
 *
 * ib_query_port() returns the attributes of a port through the
 * @port_attr pointer.
 */
int ib_query_port(struct ib_device *device,
		  u8 port_num,
		  struct ib_port_attr *port_attr)
{
1250 1251 1252
	union ib_gid gid;
	int err;

1253
	if (!rdma_is_port_valid(device, port_num))
1254 1255
		return -EINVAL;

1256
	memset(port_attr, 0, sizeof(*port_attr));
K
Kamal Heib 已提交
1257
	err = device->ops.query_port(device, port_num, port_attr);
1258 1259 1260
	if (err || port_attr->subnet_prefix)
		return err;

1261 1262 1263
	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
		return 0;

K
Kamal Heib 已提交
1264
	err = device->ops.query_gid(device, port_num, 0, &gid);
1265 1266 1267 1268 1269
	if (err)
		return err;

	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
	return 0;
L
Linus Torvalds 已提交
1270 1271 1272
}
EXPORT_SYMBOL(ib_query_port);

1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
static void add_ndev_hash(struct ib_port_data *pdata)
{
	unsigned long flags;

	might_sleep();

	spin_lock_irqsave(&ndev_hash_lock, flags);
	if (hash_hashed(&pdata->ndev_hash_link)) {
		hash_del_rcu(&pdata->ndev_hash_link);
		spin_unlock_irqrestore(&ndev_hash_lock, flags);
		/*
		 * We cannot do hash_add_rcu after a hash_del_rcu until the
		 * grace period
		 */
		synchronize_rcu();
		spin_lock_irqsave(&ndev_hash_lock, flags);
	}
	if (pdata->netdev)
		hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
			     (uintptr_t)pdata->netdev);
	spin_unlock_irqrestore(&ndev_hash_lock, flags);
}

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
/**
 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
 * @ib_dev: Device to modify
 * @ndev: net_device to affiliate, may be NULL
 * @port: IB port the net_device is connected to
 *
 * Drivers should use this to link the ib_device to a netdev so the netdev
 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
 * affiliated with any port.
 *
 * The caller must ensure that the given ndev is not unregistered or
 * unregistering, and that either the ib_device is unregistered or
 * ib_device_set_netdev() is called with NULL when the ndev sends a
 * NETDEV_UNREGISTER event.
 */
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
			 unsigned int port)
{
	struct net_device *old_ndev;
	struct ib_port_data *pdata;
	unsigned long flags;
	int ret;

	/*
	 * Drivers wish to call this before ib_register_driver, so we have to
	 * setup the port data early.
	 */
	ret = alloc_port_data(ib_dev);
	if (ret)
		return ret;

	if (!rdma_is_port_valid(ib_dev, port))
		return -EINVAL;

	pdata = &ib_dev->port_data[port];
	spin_lock_irqsave(&pdata->netdev_lock, flags);
1332 1333 1334
	old_ndev = rcu_dereference_protected(
		pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
	if (old_ndev == ndev) {
1335 1336 1337 1338 1339 1340
		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
		return 0;
	}

	if (ndev)
		dev_hold(ndev);
1341
	rcu_assign_pointer(pdata->netdev, ndev);
1342 1343
	spin_unlock_irqrestore(&pdata->netdev_lock, flags);

1344
	add_ndev_hash(pdata);
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	if (old_ndev)
		dev_put(old_ndev);

	return 0;
}
EXPORT_SYMBOL(ib_device_set_netdev);

static void free_netdevs(struct ib_device *ib_dev)
{
	unsigned long flags;
	unsigned int port;

	rdma_for_each_port (ib_dev, port) {
		struct ib_port_data *pdata = &ib_dev->port_data[port];
1359
		struct net_device *ndev;
1360 1361

		spin_lock_irqsave(&pdata->netdev_lock, flags);
1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
		ndev = rcu_dereference_protected(
			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
		if (ndev) {
			spin_lock(&ndev_hash_lock);
			hash_del_rcu(&pdata->ndev_hash_link);
			spin_unlock(&ndev_hash_lock);

			/*
			 * If this is the last dev_put there is still a
			 * synchronize_rcu before the netdev is kfreed, so we
			 * can continue to rely on unlocked pointer
			 * comparisons after the put
			 */
			rcu_assign_pointer(pdata->netdev, NULL);
			dev_put(ndev);
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
		}
		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
	}
}

struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
					unsigned int port)
{
	struct ib_port_data *pdata;
	struct net_device *res;

	if (!rdma_is_port_valid(ib_dev, port))
		return NULL;

	pdata = &ib_dev->port_data[port];

	/*
	 * New drivers should use ib_device_set_netdev() not the legacy
	 * get_netdev().
	 */
	if (ib_dev->ops.get_netdev)
		res = ib_dev->ops.get_netdev(ib_dev, port);
	else {
		spin_lock(&pdata->netdev_lock);
1401 1402
		res = rcu_dereference_protected(
			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
		if (res)
			dev_hold(res);
		spin_unlock(&pdata->netdev_lock);
	}

	/*
	 * If we are starting to unregister expedite things by preventing
	 * propagation of an unregistering netdev.
	 */
	if (res && res->reg_state != NETREG_REGISTERED) {
		dev_put(res);
		return NULL;
	}

	return res;
}

1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
/**
 * ib_device_get_by_netdev - Find an IB device associated with a netdev
 * @ndev: netdev to locate
 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
 *
 * Find and hold an ib_device that is associated with a netdev via
 * ib_device_set_netdev(). The caller must call ib_device_put() on the
 * returned pointer.
 */
struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
					  enum rdma_driver_id driver_id)
{
	struct ib_device *res = NULL;
	struct ib_port_data *cur;

	rcu_read_lock();
	hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
				    (uintptr_t)ndev) {
		if (rcu_access_pointer(cur->netdev) == ndev &&
		    (driver_id == RDMA_DRIVER_UNKNOWN ||
		     cur->ib_dev->driver_id == driver_id) &&
		    ib_device_try_get(cur->ib_dev)) {
			res = cur->ib_dev;
			break;
		}
	}
	rcu_read_unlock();

	return res;
}
EXPORT_SYMBOL(ib_device_get_by_netdev);

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
/**
 * ib_enum_roce_netdev - enumerate all RoCE ports
 * @ib_dev : IB device we want to query
 * @filter: Should we call the callback?
 * @filter_cookie: Cookie passed to filter
 * @cb: Callback to call for each found RoCE ports
 * @cookie: Cookie passed back to the callback
 *
 * Enumerates all of the physical RoCE ports of ib_dev
 * which are related to netdevice and calls callback() on each
 * device for which filter() function returns non zero.
 */
void ib_enum_roce_netdev(struct ib_device *ib_dev,
			 roce_netdev_filter filter,
			 void *filter_cookie,
			 roce_netdev_callback cb,
			 void *cookie)
{
1470
	unsigned int port;
1471

1472
	rdma_for_each_port (ib_dev, port)
1473
		if (rdma_protocol_roce(ib_dev, port)) {
1474 1475
			struct net_device *idev =
				ib_device_get_netdev(ib_dev, port);
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501

			if (filter(ib_dev, port, idev, filter_cookie))
				cb(ib_dev, port, idev, cookie);

			if (idev)
				dev_put(idev);
		}
}

/**
 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
 * @filter: Should we call the callback?
 * @filter_cookie: Cookie passed to filter
 * @cb: Callback to call for each found RoCE ports
 * @cookie: Cookie passed back to the callback
 *
 * Enumerates all RoCE devices' physical ports which are related
 * to netdevices and calls callback() on each device for which
 * filter() function returns non zero.
 */
void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
			      void *filter_cookie,
			      roce_netdev_callback cb,
			      void *cookie)
{
	struct ib_device *dev;
1502
	unsigned long index;
1503

1504
	down_read(&devices_rwsem);
1505
	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
1506
		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
1507
	up_read(&devices_rwsem);
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
}

/**
 * ib_enum_all_devs - enumerate all ib_devices
 * @cb: Callback to call for each found ib_device
 *
 * Enumerates all ib_devices and calls callback() on each device.
 */
int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
		     struct netlink_callback *cb)
{
1519
	unsigned long index;
1520 1521 1522 1523
	struct ib_device *dev;
	unsigned int idx = 0;
	int ret = 0;

1524
	down_read(&devices_rwsem);
1525
	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1526 1527 1528 1529 1530
		ret = nldev_cb(dev, skb, cb, idx);
		if (ret)
			break;
		idx++;
	}
1531
	up_read(&devices_rwsem);
1532
	return ret;
1533 1534
}

L
Linus Torvalds 已提交
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
/**
 * ib_query_pkey - Get P_Key table entry
 * @device:Device to query
 * @port_num:Port number to query
 * @index:P_Key table index to query
 * @pkey:Returned P_Key
 *
 * ib_query_pkey() fetches the specified P_Key table entry.
 */
int ib_query_pkey(struct ib_device *device,
		  u8 port_num, u16 index, u16 *pkey)
{
1547 1548 1549
	if (!rdma_is_port_valid(device, port_num))
		return -EINVAL;

K
Kamal Heib 已提交
1550
	return device->ops.query_pkey(device, port_num, index, pkey);
L
Linus Torvalds 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
}
EXPORT_SYMBOL(ib_query_pkey);

/**
 * ib_modify_device - Change IB device attributes
 * @device:Device to modify
 * @device_modify_mask:Mask of attributes to change
 * @device_modify:New attribute values
 *
 * ib_modify_device() changes a device's attributes as specified by
 * the @device_modify_mask and @device_modify structure.
 */
int ib_modify_device(struct ib_device *device,
		     int device_modify_mask,
		     struct ib_device_modify *device_modify)
{
K
Kamal Heib 已提交
1567
	if (!device->ops.modify_device)
1568 1569
		return -ENOSYS;

K
Kamal Heib 已提交
1570 1571
	return device->ops.modify_device(device, device_modify_mask,
					 device_modify);
L
Linus Torvalds 已提交
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
}
EXPORT_SYMBOL(ib_modify_device);

/**
 * ib_modify_port - Modifies the attributes for the specified port.
 * @device: The device to modify.
 * @port_num: The number of the port to modify.
 * @port_modify_mask: Mask used to specify which attributes of the port
 *   to change.
 * @port_modify: New attribute values for the port.
 *
 * ib_modify_port() changes a port's attributes as specified by the
 * @port_modify_mask and @port_modify structure.
 */
int ib_modify_port(struct ib_device *device,
		   u8 port_num, int port_modify_mask,
		   struct ib_port_modify *port_modify)
{
1590
	int rc;
1591

1592
	if (!rdma_is_port_valid(device, port_num))
1593 1594
		return -EINVAL;

K
Kamal Heib 已提交
1595 1596 1597 1598
	if (device->ops.modify_port)
		rc = device->ops.modify_port(device, port_num,
					     port_modify_mask,
					     port_modify);
1599 1600 1601
	else
		rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
	return rc;
L
Linus Torvalds 已提交
1602 1603 1604
}
EXPORT_SYMBOL(ib_modify_port);

1605 1606
/**
 * ib_find_gid - Returns the port number and GID table index where
1607
 *   a specified GID value occurs. Its searches only for IB link layer.
1608 1609 1610 1611 1612 1613 1614
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value was found.
 * @index: The index into the GID table where the GID was found.  This
 *   parameter may be NULL.
 */
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1615
		u8 *port_num, u16 *index)
1616 1617
{
	union ib_gid tmp_gid;
1618 1619
	unsigned int port;
	int ret, i;
1620

1621
	rdma_for_each_port (device, port) {
1622
		if (!rdma_protocol_ib(device, port))
1623 1624
			continue;

1625 1626
		for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
		     ++i) {
1627
			ret = rdma_query_gid(device, port, i, &tmp_gid);
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
			if (ret)
				return ret;
			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
				*port_num = port;
				if (index)
					*index = i;
				return 0;
			}
		}
	}

	return -ENOENT;
}
EXPORT_SYMBOL(ib_find_gid);

/**
 * ib_find_pkey - Returns the PKey table index where a specified
 *   PKey value occurs.
 * @device: The device to query.
 * @port_num: The port number of the device to search for the PKey.
 * @pkey: The PKey value to search for.
 * @index: The index into the PKey table where the PKey was found.
 */
int ib_find_pkey(struct ib_device *device,
		 u8 port_num, u16 pkey, u16 *index)
{
	int ret, i;
	u16 tmp_pkey;
1656
	int partial_ix = -1;
1657

1658 1659
	for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
	     ++i) {
1660 1661 1662
		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
		if (ret)
			return ret;
1663
		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1664 1665 1666 1667 1668 1669 1670
			/* if there is full-member pkey take it.*/
			if (tmp_pkey & 0x8000) {
				*index = i;
				return 0;
			}
			if (partial_ix < 0)
				partial_ix = i;
1671 1672 1673
		}
	}

1674 1675 1676 1677 1678
	/*no full-member, if exists take the limited*/
	if (partial_ix >= 0) {
		*index = partial_ix;
		return 0;
	}
1679 1680 1681 1682
	return -ENOENT;
}
EXPORT_SYMBOL(ib_find_pkey);

1683 1684 1685 1686 1687 1688 1689 1690 1691
/**
 * ib_get_net_dev_by_params() - Return the appropriate net_dev
 * for a received CM request
 * @dev:	An RDMA device on which the request has been received.
 * @port:	Port number on the RDMA device.
 * @pkey:	The Pkey the request came on.
 * @gid:	A GID that the net_dev uses to communicate.
 * @addr:	Contains the IP address that the request specified as its
 *		destination.
1692
 *
1693 1694 1695 1696 1697 1698 1699 1700
 */
struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
					    u8 port,
					    u16 pkey,
					    const union ib_gid *gid,
					    const struct sockaddr *addr)
{
	struct net_device *net_dev = NULL;
1701 1702
	unsigned long index;
	void *client_data;
1703 1704 1705 1706

	if (!rdma_protocol_ib(dev, port))
		return NULL;

1707 1708 1709 1710 1711
	/*
	 * Holding the read side guarantees that the client will not become
	 * unregistered while we are calling get_net_dev_by_params()
	 */
	down_read(&dev->client_data_rwsem);
1712 1713 1714
	xan_for_each_marked (&dev->client_data, index, client_data,
			     CLIENT_DATA_REGISTERED) {
		struct ib_client *client = xa_load(&clients, index);
1715

1716
		if (!client || !client->get_net_dev_by_params)
1717 1718
			continue;

1719 1720 1721 1722
		net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
							addr, client_data);
		if (net_dev)
			break;
1723
	}
1724
	up_read(&dev->client_data_rwsem);
1725 1726 1727 1728 1729

	return net_dev;
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);

K
Kamal Heib 已提交
1730 1731
void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
{
K
Kamal Heib 已提交
1732
	struct ib_device_ops *dev_ops = &dev->ops;
K
Kamal Heib 已提交
1733 1734 1735 1736 1737 1738 1739
#define SET_DEVICE_OP(ptr, name)                                               \
	do {                                                                   \
		if (ops->name)                                                 \
			if (!((ptr)->name))				       \
				(ptr)->name = ops->name;                       \
	} while (0)

1740 1741
#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)

K
Kamal Heib 已提交
1742
	SET_DEVICE_OP(dev_ops, add_gid);
1743
	SET_DEVICE_OP(dev_ops, advise_mr);
K
Kamal Heib 已提交
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	SET_DEVICE_OP(dev_ops, alloc_dm);
	SET_DEVICE_OP(dev_ops, alloc_fmr);
	SET_DEVICE_OP(dev_ops, alloc_hw_stats);
	SET_DEVICE_OP(dev_ops, alloc_mr);
	SET_DEVICE_OP(dev_ops, alloc_mw);
	SET_DEVICE_OP(dev_ops, alloc_pd);
	SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
	SET_DEVICE_OP(dev_ops, alloc_ucontext);
	SET_DEVICE_OP(dev_ops, alloc_xrcd);
	SET_DEVICE_OP(dev_ops, attach_mcast);
	SET_DEVICE_OP(dev_ops, check_mr_status);
	SET_DEVICE_OP(dev_ops, create_ah);
	SET_DEVICE_OP(dev_ops, create_counters);
	SET_DEVICE_OP(dev_ops, create_cq);
	SET_DEVICE_OP(dev_ops, create_flow);
	SET_DEVICE_OP(dev_ops, create_flow_action_esp);
	SET_DEVICE_OP(dev_ops, create_qp);
	SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
	SET_DEVICE_OP(dev_ops, create_srq);
	SET_DEVICE_OP(dev_ops, create_wq);
	SET_DEVICE_OP(dev_ops, dealloc_dm);
1765
	SET_DEVICE_OP(dev_ops, dealloc_driver);
K
Kamal Heib 已提交
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	SET_DEVICE_OP(dev_ops, dealloc_fmr);
	SET_DEVICE_OP(dev_ops, dealloc_mw);
	SET_DEVICE_OP(dev_ops, dealloc_pd);
	SET_DEVICE_OP(dev_ops, dealloc_ucontext);
	SET_DEVICE_OP(dev_ops, dealloc_xrcd);
	SET_DEVICE_OP(dev_ops, del_gid);
	SET_DEVICE_OP(dev_ops, dereg_mr);
	SET_DEVICE_OP(dev_ops, destroy_ah);
	SET_DEVICE_OP(dev_ops, destroy_counters);
	SET_DEVICE_OP(dev_ops, destroy_cq);
	SET_DEVICE_OP(dev_ops, destroy_flow);
	SET_DEVICE_OP(dev_ops, destroy_flow_action);
	SET_DEVICE_OP(dev_ops, destroy_qp);
	SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
	SET_DEVICE_OP(dev_ops, destroy_srq);
	SET_DEVICE_OP(dev_ops, destroy_wq);
	SET_DEVICE_OP(dev_ops, detach_mcast);
	SET_DEVICE_OP(dev_ops, disassociate_ucontext);
	SET_DEVICE_OP(dev_ops, drain_rq);
	SET_DEVICE_OP(dev_ops, drain_sq);
1786
	SET_DEVICE_OP(dev_ops, enable_driver);
1787
	SET_DEVICE_OP(dev_ops, fill_res_entry);
K
Kamal Heib 已提交
1788 1789 1790 1791 1792 1793 1794 1795 1796
	SET_DEVICE_OP(dev_ops, get_dev_fw_str);
	SET_DEVICE_OP(dev_ops, get_dma_mr);
	SET_DEVICE_OP(dev_ops, get_hw_stats);
	SET_DEVICE_OP(dev_ops, get_link_layer);
	SET_DEVICE_OP(dev_ops, get_netdev);
	SET_DEVICE_OP(dev_ops, get_port_immutable);
	SET_DEVICE_OP(dev_ops, get_vector_affinity);
	SET_DEVICE_OP(dev_ops, get_vf_config);
	SET_DEVICE_OP(dev_ops, get_vf_stats);
1797
	SET_DEVICE_OP(dev_ops, init_port);
K
Kamal Heib 已提交
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
	SET_DEVICE_OP(dev_ops, map_mr_sg);
	SET_DEVICE_OP(dev_ops, map_phys_fmr);
	SET_DEVICE_OP(dev_ops, mmap);
	SET_DEVICE_OP(dev_ops, modify_ah);
	SET_DEVICE_OP(dev_ops, modify_cq);
	SET_DEVICE_OP(dev_ops, modify_device);
	SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
	SET_DEVICE_OP(dev_ops, modify_port);
	SET_DEVICE_OP(dev_ops, modify_qp);
	SET_DEVICE_OP(dev_ops, modify_srq);
	SET_DEVICE_OP(dev_ops, modify_wq);
	SET_DEVICE_OP(dev_ops, peek_cq);
	SET_DEVICE_OP(dev_ops, poll_cq);
	SET_DEVICE_OP(dev_ops, post_recv);
	SET_DEVICE_OP(dev_ops, post_send);
	SET_DEVICE_OP(dev_ops, post_srq_recv);
	SET_DEVICE_OP(dev_ops, process_mad);
	SET_DEVICE_OP(dev_ops, query_ah);
	SET_DEVICE_OP(dev_ops, query_device);
	SET_DEVICE_OP(dev_ops, query_gid);
	SET_DEVICE_OP(dev_ops, query_pkey);
	SET_DEVICE_OP(dev_ops, query_port);
	SET_DEVICE_OP(dev_ops, query_qp);
	SET_DEVICE_OP(dev_ops, query_srq);
	SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
	SET_DEVICE_OP(dev_ops, read_counters);
	SET_DEVICE_OP(dev_ops, reg_dm_mr);
	SET_DEVICE_OP(dev_ops, reg_user_mr);
	SET_DEVICE_OP(dev_ops, req_ncomp_notif);
	SET_DEVICE_OP(dev_ops, req_notify_cq);
	SET_DEVICE_OP(dev_ops, rereg_user_mr);
	SET_DEVICE_OP(dev_ops, resize_cq);
	SET_DEVICE_OP(dev_ops, set_vf_guid);
	SET_DEVICE_OP(dev_ops, set_vf_link_state);
	SET_DEVICE_OP(dev_ops, unmap_fmr);
1833 1834

	SET_OBJ_SIZE(dev_ops, ib_pd);
1835
	SET_OBJ_SIZE(dev_ops, ib_ucontext);
K
Kamal Heib 已提交
1836 1837 1838
}
EXPORT_SYMBOL(ib_set_device_ops);

1839
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1840
	[RDMA_NL_LS_OP_RESOLVE] = {
1841
		.doit = ib_nl_handle_resolve_resp,
1842 1843
		.flags = RDMA_NL_ADMIN_PERM,
	},
1844
	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
1845
		.doit = ib_nl_handle_set_timeout,
1846 1847
		.flags = RDMA_NL_ADMIN_PERM,
	},
1848
	[RDMA_NL_LS_OP_IP_RESOLVE] = {
1849
		.doit = ib_nl_handle_ip_res_resp,
1850 1851
		.flags = RDMA_NL_ADMIN_PERM,
	},
1852 1853
};

L
Linus Torvalds 已提交
1854 1855 1856 1857
static int __init ib_core_init(void)
{
	int ret;

T
Tejun Heo 已提交
1858 1859 1860 1861
	ib_wq = alloc_workqueue("infiniband", 0, 0);
	if (!ib_wq)
		return -ENOMEM;

1862
	ib_comp_wq = alloc_workqueue("ib-comp-wq",
1863
			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1864 1865 1866 1867 1868
	if (!ib_comp_wq) {
		ret = -ENOMEM;
		goto err;
	}

1869 1870 1871 1872 1873 1874 1875 1876 1877
	ib_comp_unbound_wq =
		alloc_workqueue("ib-comp-unb-wq",
				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
	if (!ib_comp_unbound_wq) {
		ret = -ENOMEM;
		goto err_comp;
	}

1878
	ret = class_register(&ib_class);
1879
	if (ret) {
P
Parav Pandit 已提交
1880
		pr_warn("Couldn't create InfiniBand device class\n");
1881
		goto err_comp_unbound;
1882
	}
L
Linus Torvalds 已提交
1883

1884
	ret = rdma_nl_init();
R
Roland Dreier 已提交
1885
	if (ret) {
1886
		pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
R
Roland Dreier 已提交
1887 1888 1889
		goto err_sysfs;
	}

1890 1891 1892 1893 1894 1895
	ret = addr_init();
	if (ret) {
		pr_warn("Could't init IB address resolution\n");
		goto err_ibnl;
	}

1896 1897 1898 1899 1900 1901
	ret = ib_mad_init();
	if (ret) {
		pr_warn("Couldn't init IB MAD\n");
		goto err_addr;
	}

1902 1903 1904 1905 1906 1907
	ret = ib_sa_init();
	if (ret) {
		pr_warn("Couldn't init SA\n");
		goto err_mad;
	}

1908 1909 1910
	ret = register_lsm_notifier(&ibdev_lsm_nb);
	if (ret) {
		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1911
		goto err_sa;
1912 1913
	}

1914
	nldev_init();
1915
	rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1916
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1917

1918 1919
	return 0;

1920 1921
err_sa:
	ib_sa_cleanup();
1922 1923
err_mad:
	ib_mad_cleanup();
1924 1925
err_addr:
	addr_cleanup();
1926
err_ibnl:
1927
	rdma_nl_exit();
1928
err_sysfs:
1929
	class_unregister(&ib_class);
1930 1931
err_comp_unbound:
	destroy_workqueue(ib_comp_unbound_wq);
1932 1933
err_comp:
	destroy_workqueue(ib_comp_wq);
1934 1935
err:
	destroy_workqueue(ib_wq);
L
Linus Torvalds 已提交
1936 1937 1938 1939 1940
	return ret;
}

static void __exit ib_core_cleanup(void)
{
1941
	roce_gid_mgmt_cleanup();
1942
	nldev_exit();
1943 1944
	rdma_nl_unregister(RDMA_NL_LS);
	unregister_lsm_notifier(&ibdev_lsm_nb);
1945
	ib_sa_cleanup();
1946
	ib_mad_cleanup();
1947
	addr_cleanup();
1948
	rdma_nl_exit();
1949
	class_unregister(&ib_class);
1950
	destroy_workqueue(ib_comp_unbound_wq);
1951
	destroy_workqueue(ib_comp_wq);
1952
	/* Make sure that any pending umem accounting work is done. */
T
Tejun Heo 已提交
1953
	destroy_workqueue(ib_wq);
1954
	flush_workqueue(system_unbound_wq);
1955
	WARN_ON(!xa_empty(&clients));
1956
	WARN_ON(!xa_empty(&devices));
L
Linus Torvalds 已提交
1957 1958
}

1959 1960
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);

1961
subsys_initcall(ib_core_init);
L
Linus Torvalds 已提交
1962
module_exit(ib_core_cleanup);