cache.c 32.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Intel Corporation. All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
A
Alexey Dobriyan 已提交
39
#include <linux/workqueue.h>
40 41
#include <linux/netdevice.h>
#include <net/addrconf.h>
L
Linus Torvalds 已提交
42

43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55

#include "core_priv.h"

struct ib_pkey_cache {
	int             table_len;
	u16             table[0];
};

struct ib_update_work {
	struct work_struct work;
	struct ib_device  *device;
	u8                 port_num;
56
	bool		   enforce_security;
L
Linus Torvalds 已提交
57 58
};

59 60
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
61 62 63 64 65

enum gid_attr_find_mask {
	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
66
	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
};

enum gid_table_entry_props {
	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
};

struct ib_gid_table_entry {
	unsigned long	    props;
	union ib_gid        gid;
	struct ib_gid_attr  attr;
	void		   *context;
};

struct ib_gid_table {
	int                  sz;
	/* In RoCE, adding a GID to the table requires:
	 * (a) Find if this GID is already exists.
	 * (b) Find a free space.
	 * (c) Write the new GID
	 *
	 * Delete requires different set of operations:
	 * (a) Find the GID
	 * (b) Delete it.
	 *
	 **/
93 94 95
	/* Any writer to data_vec must hold this lock and the write side of
	 * rwlock. readers must hold only rwlock. All writers must be in a
	 * sleepable context.
96
	 */
97 98
	struct mutex         lock;
	/* rwlock protects data_vec[ix]->props. */
99
	rwlock_t	     rwlock;
100 101 102
	struct ib_gid_table_entry *data_vec;
};

103 104
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
105
	struct ib_event event;
106

107 108 109
	event.device		= ib_dev;
	event.element.port_num	= port;
	event.event		= IB_EVENT_GID_CHANGE;
110

111
	ib_dispatch_event(&event);
112 113
}

114 115
static const char * const gid_type_str[] = {
	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
116
	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
117 118 119 120 121 122 123 124 125 126 127
};

const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
{
	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
		return gid_type_str[gid_type];

	return "Invalid GID type";
}
EXPORT_SYMBOL(ib_cache_gid_type_str);

M
Matan Barak 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
int ib_cache_gid_parse_type_str(const char *buf)
{
	unsigned int i;
	size_t len;
	int err = -EINVAL;

	len = strlen(buf);
	if (len == 0)
		return -EINVAL;

	if (buf[len - 1] == '\n')
		len--;

	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
		    len == strlen(gid_type_str[i])) {
			err = i;
			break;
		}

	return err;
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

152 153
static void del_roce_gid(struct ib_device *device, u8 port_num,
			 struct ib_gid_table *table, int ix)
L
Linus Torvalds 已提交
154
{
155 156 157 158 159
	pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
		 device->name, port_num, ix,
		 table->data_vec[ix].gid.raw);

	if (rdma_cap_roce_gid_table(device, port_num))
160
		device->del_gid(&table->data_vec[ix].attr,
161 162 163
				&table->data_vec[ix].context);
	dev_put(table->data_vec[ix].attr.ndev);
}
164

165 166 167 168 169 170 171
static int add_roce_gid(struct ib_gid_table *table,
			const union ib_gid *gid,
			const struct ib_gid_attr *attr)
{
	struct ib_gid_table_entry *entry;
	int ix = attr->index;
	int ret = 0;
172

173 174 175 176 177
	if (!attr->ndev) {
		pr_err("%s NULL netdev device=%s port=%d index=%d\n",
		       __func__, attr->device->name, attr->port_num,
		       attr->index);
		return -EINVAL;
178 179
	}

180 181 182 183 184 185
	entry = &table->data_vec[ix];
	if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
		WARN(1, "GID table corruption device=%s port=%d index=%d\n",
		     attr->device->name, attr->port_num,
		     attr->index);
		return -EINVAL;
186
	}
187

188
	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
189
		ret = attr->device->add_gid(gid, attr, &entry->context);
190 191 192 193 194 195
		if (ret) {
			pr_err("%s GID add failed device=%s port=%d index=%d\n",
			       __func__, attr->device->name, attr->port_num,
			       attr->index);
			goto add_err;
		}
196
	}
197
	dev_hold(attr->ndev);
198

199 200 201 202
add_err:
	if (!ret)
		pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
			 attr->device->name, attr->port_num, ix, gid->raw);
203 204 205
	return ret;
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/**
 * add_modify_gid - Add or modify GID table entry
 *
 * @table:	GID table in which GID to be added or modified
 * @gid:	GID content
 * @attr:	Attributes of the GID
 *
 * Returns 0 on success or appropriate error code. It accepts zero
 * GID addition for non RoCE ports for HCA's who report them as valid
 * GID. However such zero GIDs are not added to the cache.
 */
static int add_modify_gid(struct ib_gid_table *table,
			  const union ib_gid *gid,
			  const struct ib_gid_attr *attr)
{
	int ret;

	if (rdma_protocol_roce(attr->device, attr->port_num)) {
		ret = add_roce_gid(table, gid, attr);
		if (ret)
			return ret;
	} else {
		/*
		 * Some HCA's report multiple GID entries with only one
		 * valid GID, but remaining as zero GID.
		 * So ignore such behavior for IB link layer and don't
		 * fail the call, but don't add such entry to GID cache.
		 */
		if (!memcmp(gid, &zgid, sizeof(*gid)))
			return 0;
	}

	lockdep_assert_held(&table->lock);
	memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
241

242 243 244 245
	write_lock_irq(&table->rwlock);
	table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
	write_unlock_irq(&table->rwlock);
	return 0;
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
/**
 * del_gid - Delete GID table entry
 *
 * @ib_dev:	IB device whose GID entry to be deleted
 * @port:	Port number of the IB device
 * @table:	GID table of the IB device for a port
 * @ix:		GID entry index to delete
 *
 */
static void del_gid(struct ib_device *ib_dev, u8 port,
		    struct ib_gid_table *table, int ix)
{
	lockdep_assert_held(&table->lock);
	write_lock_irq(&table->rwlock);
	table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
	write_unlock_irq(&table->rwlock);

	if (rdma_protocol_roce(ib_dev, port))
		del_roce_gid(ib_dev, port, table, ix);
	memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
	memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
	table->data_vec[ix].context = NULL;
270 271
}

272
/* rwlock should be read locked, or lock should be held */
273 274
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		    const struct ib_gid_attr *val, bool default_gid,
275
		    unsigned long mask, int *pempty)
276
{
277 278 279
	int i = 0;
	int found = -1;
	int empty = pempty ? -1 : 0;
280

281 282 283 284
	while (i < table->sz && (found < 0 || empty < 0)) {
		struct ib_gid_table_entry *data = &table->data_vec[i];
		struct ib_gid_attr *attr = &data->attr;
		int curr_index = i;
285

286
		i++;
287

288 289 290 291 292 293
		/* find_gid() is used during GID addition where it is expected
		 * to return a free entry slot which is not duplicate.
		 * Free entry slot is requested and returned if pempty is set,
		 * so lookup free slot only if requested.
		 */
		if (pempty && empty < 0) {
294 295 296 297 298 299 300 301 302 303 304 305
			if (data->props & GID_TABLE_ENTRY_INVALID &&
			    (default_gid ==
			     !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
				/*
				 * Found an invalid (free) entry; allocate it.
				 * If default GID is requested, then our
				 * found slot must be one of the DEFAULT
				 * reserved slots or we fail.
				 * This ensures that only DEFAULT reserved
				 * slots are used for default property GIDs.
				 */
				empty = curr_index;
306 307 308 309 310 311 312 313 314
			}
		}

		/*
		 * Additionally find_gid() is used to find valid entry during
		 * lookup operation, where validity needs to be checked. So
		 * find the empty entry first to continue to search for a free
		 * slot and ignore its INVALID flag.
		 */
315 316 317 318
		if (data->props & GID_TABLE_ENTRY_INVALID)
			continue;

		if (found >= 0)
319
			continue;
320

321 322 323 324
		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
		    attr->gid_type != val->gid_type)
			continue;

325
		if (mask & GID_ATTR_FIND_MASK_GID &&
326
		    memcmp(gid, &data->gid, sizeof(*gid)))
327
			continue;
328 329 330

		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
		    attr->ndev != val->ndev)
331
			continue;
332 333

		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
334
		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
335
		    default_gid)
336
			continue;
337

338
		found = curr_index;
339 340
	}

341 342 343 344
	if (pempty)
		*pempty = empty;

	return found;
345 346 347 348 349 350 351 352
}

static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
{
	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

353 354 355
static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
			      union ib_gid *gid, struct ib_gid_attr *attr,
			      unsigned long mask, bool default_gid)
356 357
{
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
358
	int ret = 0;
359
	int empty;
360
	int ix;
L
Linus Torvalds 已提交
361

362 363 364 365
	/* Do not allow adding zero GID in support of
	 * IB spec version 1.3 section 4.1.1 point (6) and
	 * section 12.7.10 and section 12.7.20
	 */
366
	if (!memcmp(gid, &zgid, sizeof(*gid)))
L
Linus Torvalds 已提交
367 368
		return -EINVAL;

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

	mutex_lock(&table->lock);

	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
	if (ix >= 0)
		goto out_unlock;

	if (empty < 0) {
		ret = -ENOSPC;
		goto out_unlock;
	}
	attr->device = ib_dev;
	attr->index = empty;
	attr->port_num = port;
	ret = add_modify_gid(table, gid, attr);
	if (!ret)
		dispatch_gid_change_event(ib_dev, port);

out_unlock:
	mutex_unlock(&table->lock);
	if (ret)
		pr_warn("%s: unable to add gid %pI6 error=%d\n",
			__func__, gid->raw, ret);
	return ret;
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct net_device *idev;
	unsigned long mask;
	int ret;

403 404 405 406
	if (ib_dev->get_netdev) {
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev) {
			union ib_gid default_gid;
L
Linus Torvalds 已提交
407

408 409 410 411 412 413 414 415 416 417
			/* Adding default GIDs in not permitted */
			make_default_gid(idev, &default_gid);
			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
				dev_put(idev);
				return -EPERM;
			}
		}
		if (idev)
			dev_put(idev);
	}
L
Linus Torvalds 已提交
418

419 420 421
	mask = GID_ATTR_FIND_MASK_GID |
	       GID_ATTR_FIND_MASK_GID_TYPE |
	       GID_ATTR_FIND_MASK_NETDEV;
422

423
	ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
L
Linus Torvalds 已提交
424 425 426
	return ret;
}

427 428
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
L
Linus Torvalds 已提交
429
{
430
	struct ib_gid_table *table;
431
	int ret = 0;
432 433
	int ix;

434
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
435 436 437 438 439

	mutex_lock(&table->lock);

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
440
		      GID_ATTR_FIND_MASK_GID_TYPE |
441
		      GID_ATTR_FIND_MASK_NETDEV,
442
		      NULL);
443 444
	if (ix < 0) {
		ret = -EINVAL;
445
		goto out_unlock;
446
	}
447

448 449
	del_gid(ib_dev, port, table, ix);
	dispatch_gid_change_event(ib_dev, port);
450 451 452

out_unlock:
	mutex_unlock(&table->lock);
453 454 455 456
	if (ret)
		pr_debug("%s: can't delete gid %pI6 error=%d\n",
			 __func__, gid->raw, ret);
	return ret;
457 458 459 460 461 462 463
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
	struct ib_gid_table *table;
	int ix;
464
	bool deleted = false;
465

466
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
467 468 469

	mutex_lock(&table->lock);

470 471 472 473 474 475
	for (ix = 0; ix < table->sz; ix++) {
		if (table->data_vec[ix].attr.ndev == ndev) {
			del_gid(ib_dev, port, table, ix);
			deleted = true;
		}
	}
476 477

	mutex_unlock(&table->lock);
478 479 480 481

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);

482 483 484 485 486 487 488
	return 0;
}

static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
			      union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
489

490
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
L
Linus Torvalds 已提交
491

492 493
	if (index < 0 || index >= table->sz)
		return -EINVAL;
L
Linus Torvalds 已提交
494

495
	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
		return -EAGAIN;

	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
	if (attr) {
		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
		if (attr->ndev)
			dev_hold(attr->ndev);
	}

	return 0;
}

static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
				    const union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_gid_table *table;
	u8 p;
	int local_index;
517
	unsigned long flags;
518 519

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
520
		table = ib_dev->cache.ports[p].gid;
521
		read_lock_irqsave(&table->rwlock, flags);
522
		local_index = find_gid(table, gid, val, false, mask, NULL);
523 524 525 526 527
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + rdma_start_port(ib_dev);
528
			read_unlock_irqrestore(&table->rwlock, flags);
529
			return 0;
L
Linus Torvalds 已提交
530
		}
531
		read_unlock_irqrestore(&table->rwlock, flags);
L
Linus Torvalds 已提交
532 533
	}

534 535 536 537 538
	return -ENOENT;
}

static int ib_cache_gid_find(struct ib_device *ib_dev,
			     const union ib_gid *gid,
539
			     enum ib_gid_type gid_type,
540 541 542
			     struct net_device *ndev, u8 *port,
			     u16 *index)
{
543 544 545
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
546 547 548 549 550 551 552 553

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
					mask, port, index);
}

554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 * ib_find_cached_gid_by_port - Returns the GID table index where a specified
 * GID value occurs. It searches for the specified GID value in the local
 * software cache.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @port_num: The port number of the device where the GID value should be
 *   searched.
 * @ndev: In RoCE, the net device of the device. Null means ignore.
 * @index: The index into the cached GID table where the GID was found. This
 *   parameter may be NULL.
 */
567 568
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
569
			       enum ib_gid_type gid_type,
570 571
			       u8 port, struct net_device *ndev,
			       u16 *index)
572 573 574
{
	int local_index;
	struct ib_gid_table *table;
575 576 577
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
578
	unsigned long flags;
579

580
	if (!rdma_is_port_valid(ib_dev, port))
581 582
		return -ENOENT;

583
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
584 585 586 587

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

588
	read_lock_irqsave(&table->rwlock, flags);
589
	local_index = find_gid(table, gid, &val, false, mask, NULL);
590 591 592
	if (local_index >= 0) {
		if (index)
			*index = local_index;
593
		read_unlock_irqrestore(&table->rwlock, flags);
594 595 596
		return 0;
	}

597
	read_unlock_irqrestore(&table->rwlock, flags);
598 599
	return -ENOENT;
}
600
EXPORT_SYMBOL(ib_find_cached_gid_by_port);
601

602
/**
603
 * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
604 605 606 607 608 609 610 611 612 613
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
614
 * @index: The index into the cached GID table where the GID was found. This
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table *table;
	unsigned int i;
633
	unsigned long flags;
634 635 636
	bool found = false;


637
	if (!rdma_is_port_valid(ib_dev, port) ||
638 639 640
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

641
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
642

643
	read_lock_irqsave(&table->rwlock, flags);
644 645 646 647
	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
648
			continue;
649 650

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
651
			continue;
652 653 654

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

655
		if (filter(gid, &attr, context)) {
656
			found = true;
657 658
			if (index)
				*index = i;
659
			break;
660
		}
661
	}
662
	read_unlock_irqrestore(&table->rwlock, flags);
663 664 665 666 667 668

	if (!found)
		return -ENOENT;
	return 0;
}

669 670 671 672
static struct ib_gid_table *alloc_gid_table(int sz)
{
	struct ib_gid_table *table =
		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
673
	int i;
674

675 676 677 678 679 680 681 682 683 684
	if (!table)
		return NULL;

	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
	if (!table->data_vec)
		goto err_free_table;

	mutex_init(&table->lock);

	table->sz = sz;
685
	rwlock_init(&table->rwlock);
686

687 688 689 690 691
	/* Mark all entries as invalid so that allocator can allocate
	 * one of the invalid (free) entry.
	 */
	for (i = 0; i < sz; i++)
		table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
	return table;

err_free_table:
	kfree(table);
	return NULL;
}

static void release_gid_table(struct ib_gid_table *table)
{
	if (table) {
		kfree(table->data_vec);
		kfree(table);
	}
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
				   struct ib_gid_table *table)
{
	int i;
711
	bool deleted = false;
712 713 714 715

	if (!table)
		return;

716
	mutex_lock(&table->lock);
717 718
	for (i = 0; i < table->sz; ++i) {
		if (memcmp(&table->data_vec[i].gid, &zgid,
719 720 721 722
			   sizeof(table->data_vec[i].gid))) {
			del_gid(ib_dev, port, table, i);
			deleted = true;
		}
723
	}
724
	mutex_unlock(&table->lock);
725 726 727

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);
728 729 730 731
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
732
				  unsigned long gid_type_mask,
733 734 735 736 737
				  enum ib_cache_gid_default_mode mode)
{
	union ib_gid gid;
	struct ib_gid_attr gid_attr;
	struct ib_gid_table *table;
738
	unsigned int gid_type;
739
	unsigned long mask;
740

741
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
742 743 744 745 746

	make_default_gid(ndev, &gid);
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

747 748 749 750 751 752 753
	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
		if (1UL << gid_type & ~gid_type_mask)
			continue;

		gid_attr.gid_type = gid_type;

		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
754 755 756 757 758 759
			mask = GID_ATTR_FIND_MASK_GID_TYPE |
			       GID_ATTR_FIND_MASK_DEFAULT;
			__ib_cache_gid_add(ib_dev, port, &gid,
					   &gid_attr, mask, true);
		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
			ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
760
		}
761
	}
762 763 764 765 766
}

static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				     struct ib_gid_table *table)
{
767 768 769 770 771 772 773 774 775 776
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;
	unsigned int current_gid = 0;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	for (i = 0; i < num_default_gids && i < table->sz; i++) {
		struct ib_gid_table_entry *entry =
			&table->data_vec[i];
777 778

		entry->props |= GID_TABLE_ENTRY_DEFAULT;
779 780 781 782
		current_gid = find_next_bit(&roce_gid_type_mask,
					    BITS_PER_LONG,
					    current_gid);
		entry->attr.gid_type = current_gid++;
783 784 785 786 787 788 789 790
	}

	return 0;
}

static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	u8 port;
791
	struct ib_gid_table *table;
792 793 794 795 796
	int err = 0;

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		u8 rdma_port = port + rdma_start_port(ib_dev);

797
		table =
798 799
			alloc_gid_table(
				ib_dev->port_immutable[rdma_port].gid_tbl_len);
800
		if (!table) {
801 802 803 804 805 806
			err = -ENOMEM;
			goto rollback_table_setup;
		}

		err = gid_table_reserve_default(ib_dev,
						port + rdma_start_port(ib_dev),
807
						table);
808 809
		if (err)
			goto rollback_table_setup;
810
		ib_dev->cache.ports[port].gid = table;
811 812 813 814 815 816
	}

	return 0;

rollback_table_setup:
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
817 818
		table = ib_dev->cache.ports[port].gid;

819
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
820 821
				       table);
		release_gid_table(table);
822 823 824 825 826 827 828
	}

	return err;
}

static void gid_table_release_one(struct ib_device *ib_dev)
{
829
	struct ib_gid_table *table;
830 831
	u8 port;

832 833 834 835 836
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
		release_gid_table(table);
		ib_dev->cache.ports[port].gid = NULL;
	}
837 838 839 840
}

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
841
	struct ib_gid_table *table;
842 843
	u8 port;

844 845
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
846
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
847 848
				       table);
	}
849 850 851 852 853 854 855 856 857 858 859
}

static int gid_table_setup_one(struct ib_device *ib_dev)
{
	int err;

	err = _gid_table_setup_one(ib_dev);

	if (err)
		return err;

860
	rdma_roce_rescan_device(ib_dev);
861 862 863 864 865 866 867

	return err;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
868 869
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
870
{
871 872
	int res;
	unsigned long flags;
873
	struct ib_gid_table *table;
874

875
	if (!rdma_is_port_valid(device, port_num))
876 877
		return -EINVAL;

878
	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
879 880 881 882 883
	read_lock_irqsave(&table->rwlock, flags);
	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
	read_unlock_irqrestore(&table->rwlock, flags);

	return res;
884 885 886
}
EXPORT_SYMBOL(ib_get_cached_gid);

887 888 889 890 891 892 893 894 895 896 897 898 899 900
/**
 * ib_find_cached_gid - Returns the port number and GID table index where
 *   a specified GID value occurs.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 * @port_num: The port number of the device where the GID value was found.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_find_cached_gid() searches for the specified GID value in
 * the local software cache.
 */
901 902
int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
903
		       enum ib_gid_type gid_type,
904
		       struct net_device *ndev,
905 906 907
		       u8               *port_num,
		       u16              *index)
{
908
	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
L
Linus Torvalds 已提交
909 910 911
}
EXPORT_SYMBOL(ib_find_cached_gid);

912 913 914 915 916 917 918 919 920
int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
921
	if (!rdma_protocol_roce(device, port_num) && filter)
922 923 924 925 926 927 928
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}

L
Linus Torvalds 已提交
929 930 931 932 933 934 935 936 937
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

938
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
939 940 941 942
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

943
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
944 945 946 947 948 949 950 951 952 953 954 955

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);

956 957 958 959 960 961 962
int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
				u64              *sn_pfx)
{
	unsigned long flags;
	int p;

P
Parav Pandit 已提交
963
	if (!rdma_is_port_valid(device, port_num))
964 965 966 967 968 969 970 971 972 973 974
		return -EINVAL;

	p = port_num - rdma_start_port(device);
	read_lock_irqsave(&device->cache.lock, flags);
	*sn_pfx = device->cache.ports[p].subnet_prefix;
	read_unlock_irqrestore(&device->cache.lock, flags);

	return 0;
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);

L
Linus Torvalds 已提交
975 976 977 978 979 980 981 982 983
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
984
	int partial_ix = -1;
L
Linus Torvalds 已提交
985

986
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
987 988 989 990
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

991
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
992 993 994 995 996

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
997 998 999 1000 1001 1002
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
L
Linus Torvalds 已提交
1003 1004
		}

1005 1006 1007 1008 1009
	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}

L
Linus Torvalds 已提交
1010 1011 1012 1013 1014 1015
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

1026
	if (!rdma_is_port_valid(device, port_num))
1027 1028 1029 1030
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

1031
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

J
Jack Morgenstein 已提交
1048 1049 1050 1051 1052 1053 1054
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

1055
	if (!rdma_is_port_valid(device, port_num))
J
Jack Morgenstein 已提交
1056 1057 1058
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1059
	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
J
Jack Morgenstein 已提交
1060 1061 1062 1063 1064 1065
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);

1066 1067 1068 1069 1070 1071 1072
int ib_get_cached_port_state(struct ib_device   *device,
			     u8                  port_num,
			     enum ib_port_state *port_state)
{
	unsigned long flags;
	int ret = 0;

P
Parav Pandit 已提交
1073
	if (!rdma_is_port_valid(device, port_num))
1074 1075 1076
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1077 1078
	*port_state = device->cache.ports[port_num
		- rdma_start_port(device)].port_state;
1079 1080 1081 1082 1083 1084
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_port_state);

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
static int config_non_roce_gid_cache(struct ib_device *device,
				     u8 port, int gid_tbl_len)
{
	struct ib_gid_attr gid_attr = {};
	struct ib_gid_table *table;
	union ib_gid gid;
	int ret = 0;
	int i;

	gid_attr.device = device;
	gid_attr.port_num = port;
	table = device->cache.ports[port - rdma_start_port(device)].gid;

	mutex_lock(&table->lock);
	for (i = 0; i < gid_tbl_len; ++i) {
		if (!device->query_gid)
			continue;
		ret = device->query_gid(device, port, i, &gid);
		if (ret) {
			pr_warn("query_gid failed (%d) for %s (index %d)\n",
				ret, device->name, i);
			goto err;
		}
		gid_attr.index = i;
		add_modify_gid(table, &gid, &gid_attr);
	}
err:
	mutex_unlock(&table->lock);
	return ret;
}

L
Linus Torvalds 已提交
1116
static void ib_cache_update(struct ib_device *device,
1117 1118
			    u8                port,
			    bool	      enforce_security)
L
Linus Torvalds 已提交
1119 1120 1121 1122 1123
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	int                        i;
	int                        ret;
1124 1125
	struct ib_gid_table	  *table;

1126
	if (!rdma_is_port_valid(device, port))
1127 1128
		return;

1129
	table = device->cache.ports[port - rdma_start_port(device)].gid;
L
Linus Torvalds 已提交
1130 1131 1132 1133 1134 1135 1136

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
P
Parav Pandit 已提交
1137 1138
		pr_warn("ib_query_port failed (%d) for %s\n",
			ret, device->name);
L
Linus Torvalds 已提交
1139 1140 1141
		goto err;
	}

1142 1143 1144 1145 1146 1147 1148
	if (!rdma_protocol_roce(device, port)) {
		ret = config_non_roce_gid_cache(device, port,
						tprops->gid_tbl_len);
		if (ret)
			goto err;
	}

L
Linus Torvalds 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
P
Parav Pandit 已提交
1159 1160
			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
				ret, device->name, i);
L
Linus Torvalds 已提交
1161 1162 1163 1164 1165 1166
			goto err;
		}
	}

	write_lock_irq(&device->cache.lock);

1167 1168
	old_pkey_cache = device->cache.ports[port -
		rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
1169

1170 1171 1172
	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
	device->cache.ports[port - rdma_start_port(device)].port_state =
J
Jack Wang 已提交
1173
		tprops->state;
J
Jack Morgenstein 已提交
1174

1175 1176
	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
							tprops->subnet_prefix;
L
Linus Torvalds 已提交
1177 1178
	write_unlock_irq(&device->cache.lock);

1179 1180 1181 1182 1183
	if (enforce_security)
		ib_security_cache_change(device,
					 port,
					 tprops->subnet_prefix);

L
Linus Torvalds 已提交
1184 1185 1186 1187 1188 1189 1190 1191 1192
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(tprops);
}

D
David Howells 已提交
1193
static void ib_cache_task(struct work_struct *_work)
L
Linus Torvalds 已提交
1194
{
D
David Howells 已提交
1195 1196
	struct ib_update_work *work =
		container_of(_work, struct ib_update_work, work);
L
Linus Torvalds 已提交
1197

1198 1199 1200
	ib_cache_update(work->device,
			work->port_num,
			work->enforce_security);
L
Linus Torvalds 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
	kfree(work);
}

static void ib_cache_event(struct ib_event_handler *handler,
			   struct ib_event *event)
{
	struct ib_update_work *work;

	if (event->event == IB_EVENT_PORT_ERR    ||
	    event->event == IB_EVENT_PORT_ACTIVE ||
	    event->event == IB_EVENT_LID_CHANGE  ||
	    event->event == IB_EVENT_PKEY_CHANGE ||
1213
	    event->event == IB_EVENT_SM_CHANGE   ||
O
Or Gerlitz 已提交
1214 1215
	    event->event == IB_EVENT_CLIENT_REREGISTER ||
	    event->event == IB_EVENT_GID_CHANGE) {
L
Linus Torvalds 已提交
1216 1217
		work = kmalloc(sizeof *work, GFP_ATOMIC);
		if (work) {
D
David Howells 已提交
1218
			INIT_WORK(&work->work, ib_cache_task);
L
Linus Torvalds 已提交
1219 1220
			work->device   = event->device;
			work->port_num = event->element.port_num;
1221 1222 1223 1224 1225 1226
			if (event->event == IB_EVENT_PKEY_CHANGE ||
			    event->event == IB_EVENT_GID_CHANGE)
				work->enforce_security = true;
			else
				work->enforce_security = false;

T
Tejun Heo 已提交
1227
			queue_work(ib_wq, &work->work);
L
Linus Torvalds 已提交
1228 1229 1230 1231
		}
	}
}

1232
int ib_cache_setup_one(struct ib_device *device)
L
Linus Torvalds 已提交
1233 1234
{
	int p;
1235
	int err;
L
Linus Torvalds 已提交
1236 1237 1238

	rwlock_init(&device->cache.lock);

1239 1240
	device->cache.ports =
		kzalloc(sizeof(*device->cache.ports) *
1241
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1242 1243
	if (!device->cache.ports)
		return -ENOMEM;
L
Linus Torvalds 已提交
1244

1245
	err = gid_table_setup_one(device);
1246 1247 1248 1249 1250
	if (err) {
		kfree(device->cache.ports);
		device->cache.ports = NULL;
		return err;
	}
1251

1252
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1253
		ib_cache_update(device, p + rdma_start_port(device), true);
L
Linus Torvalds 已提交
1254 1255 1256

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
1257
	ib_register_event_handler(&device->cache.event_handler);
1258
	return 0;
L
Linus Torvalds 已提交
1259 1260
}

1261
void ib_cache_release_one(struct ib_device *device)
L
Linus Torvalds 已提交
1262 1263 1264
{
	int p;

1265 1266 1267 1268 1269 1270
	/*
	 * The release function frees all the cache elements.
	 * This function should be called as part of freeing
	 * all the device's resources when the cache could no
	 * longer be accessed.
	 */
1271 1272
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
		kfree(device->cache.ports[p].pkey);
1273 1274

	gid_table_release_one(device);
1275
	kfree(device->cache.ports);
L
Linus Torvalds 已提交
1276 1277
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
void ib_cache_cleanup_one(struct ib_device *device)
{
	/* The cleanup function unregisters the event handler,
	 * waits for all in-progress workqueue elements and cleans
	 * up the GID cache. This function should be called after
	 * the device was removed from the devices list and all
	 * clients were removed, so the cache exists but is
	 * non-functional and shouldn't be updated anymore.
	 */
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);
	gid_table_cleanup_one(device);
}
L
Linus Torvalds 已提交
1291

1292
void __init ib_cache_setup(void)
L
Linus Torvalds 已提交
1293
{
1294
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1295 1296 1297 1298
}

void __exit ib_cache_cleanup(void)
{
1299
	roce_gid_mgmt_cleanup();
L
Linus Torvalds 已提交
1300
}