cache.c 32.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Intel Corporation. All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
A
Alexey Dobriyan 已提交
39
#include <linux/workqueue.h>
40 41
#include <linux/netdevice.h>
#include <net/addrconf.h>
L
Linus Torvalds 已提交
42

43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55

#include "core_priv.h"

struct ib_pkey_cache {
	int             table_len;
	u16             table[0];
};

struct ib_update_work {
	struct work_struct work;
	struct ib_device  *device;
	u8                 port_num;
56
	bool		   enforce_security;
L
Linus Torvalds 已提交
57 58
};

59 60
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
61 62 63 64 65

enum gid_attr_find_mask {
	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
66
	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
};

enum gid_table_entry_props {
	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
};

struct ib_gid_table_entry {
	unsigned long	    props;
	union ib_gid        gid;
	struct ib_gid_attr  attr;
	void		   *context;
};

struct ib_gid_table {
	int                  sz;
	/* In RoCE, adding a GID to the table requires:
	 * (a) Find if this GID is already exists.
	 * (b) Find a free space.
	 * (c) Write the new GID
	 *
	 * Delete requires different set of operations:
	 * (a) Find the GID
	 * (b) Delete it.
	 *
	 **/
93 94 95
	/* Any writer to data_vec must hold this lock and the write side of
	 * rwlock. readers must hold only rwlock. All writers must be in a
	 * sleepable context.
96
	 */
97 98
	struct mutex         lock;
	/* rwlock protects data_vec[ix]->props. */
99
	rwlock_t	     rwlock;
100 101 102
	struct ib_gid_table_entry *data_vec;
};

103 104
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
105
	struct ib_event event;
106

107 108 109
	event.device		= ib_dev;
	event.element.port_num	= port;
	event.event		= IB_EVENT_GID_CHANGE;
110

111
	ib_dispatch_event(&event);
112 113
}

114 115
static const char * const gid_type_str[] = {
	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
116
	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
117 118 119 120 121 122 123 124 125 126 127
};

const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
{
	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
		return gid_type_str[gid_type];

	return "Invalid GID type";
}
EXPORT_SYMBOL(ib_cache_gid_type_str);

M
Matan Barak 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
int ib_cache_gid_parse_type_str(const char *buf)
{
	unsigned int i;
	size_t len;
	int err = -EINVAL;

	len = strlen(buf);
	if (len == 0)
		return -EINVAL;

	if (buf[len - 1] == '\n')
		len--;

	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
		    len == strlen(gid_type_str[i])) {
			err = i;
			break;
		}

	return err;
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

152 153
static void del_roce_gid(struct ib_device *device, u8 port_num,
			 struct ib_gid_table *table, int ix)
L
Linus Torvalds 已提交
154
{
155 156 157 158 159
	pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
		 device->name, port_num, ix,
		 table->data_vec[ix].gid.raw);

	if (rdma_cap_roce_gid_table(device, port_num))
160
		device->del_gid(&table->data_vec[ix].attr,
161 162 163
				&table->data_vec[ix].context);
	dev_put(table->data_vec[ix].attr.ndev);
}
164

165 166 167 168 169 170 171
static int add_roce_gid(struct ib_gid_table *table,
			const union ib_gid *gid,
			const struct ib_gid_attr *attr)
{
	struct ib_gid_table_entry *entry;
	int ix = attr->index;
	int ret = 0;
172

173 174 175 176 177
	if (!attr->ndev) {
		pr_err("%s NULL netdev device=%s port=%d index=%d\n",
		       __func__, attr->device->name, attr->port_num,
		       attr->index);
		return -EINVAL;
178 179
	}

180 181 182 183 184 185
	entry = &table->data_vec[ix];
	if ((entry->props & GID_TABLE_ENTRY_INVALID) == 0) {
		WARN(1, "GID table corruption device=%s port=%d index=%d\n",
		     attr->device->name, attr->port_num,
		     attr->index);
		return -EINVAL;
186
	}
187

188
	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
189
		ret = attr->device->add_gid(gid, attr, &entry->context);
190 191 192 193 194 195
		if (ret) {
			pr_err("%s GID add failed device=%s port=%d index=%d\n",
			       __func__, attr->device->name, attr->port_num,
			       attr->index);
			goto add_err;
		}
196
	}
197
	dev_hold(attr->ndev);
198

199 200 201 202
add_err:
	if (!ret)
		pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
			 attr->device->name, attr->port_num, ix, gid->raw);
203 204 205
	return ret;
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/**
 * add_modify_gid - Add or modify GID table entry
 *
 * @table:	GID table in which GID to be added or modified
 * @gid:	GID content
 * @attr:	Attributes of the GID
 *
 * Returns 0 on success or appropriate error code. It accepts zero
 * GID addition for non RoCE ports for HCA's who report them as valid
 * GID. However such zero GIDs are not added to the cache.
 */
static int add_modify_gid(struct ib_gid_table *table,
			  const union ib_gid *gid,
			  const struct ib_gid_attr *attr)
{
	int ret;

	if (rdma_protocol_roce(attr->device, attr->port_num)) {
		ret = add_roce_gid(table, gid, attr);
		if (ret)
			return ret;
	} else {
		/*
		 * Some HCA's report multiple GID entries with only one
		 * valid GID, but remaining as zero GID.
		 * So ignore such behavior for IB link layer and don't
		 * fail the call, but don't add such entry to GID cache.
		 */
		if (!memcmp(gid, &zgid, sizeof(*gid)))
			return 0;
	}

	lockdep_assert_held(&table->lock);
	memcpy(&table->data_vec[attr->index].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[attr->index].attr, attr, sizeof(*attr));
241

242 243 244 245
	write_lock_irq(&table->rwlock);
	table->data_vec[attr->index].props &= ~GID_TABLE_ENTRY_INVALID;
	write_unlock_irq(&table->rwlock);
	return 0;
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
/**
 * del_gid - Delete GID table entry
 *
 * @ib_dev:	IB device whose GID entry to be deleted
 * @port:	Port number of the IB device
 * @table:	GID table of the IB device for a port
 * @ix:		GID entry index to delete
 *
 */
static void del_gid(struct ib_device *ib_dev, u8 port,
		    struct ib_gid_table *table, int ix)
{
	lockdep_assert_held(&table->lock);
	write_lock_irq(&table->rwlock);
	table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
	write_unlock_irq(&table->rwlock);

	if (rdma_protocol_roce(ib_dev, port))
		del_roce_gid(ib_dev, port, table, ix);
	memcpy(&table->data_vec[ix].gid, &zgid, sizeof(zgid));
	memset(&table->data_vec[ix].attr, 0, sizeof(table->data_vec[ix].attr));
	table->data_vec[ix].context = NULL;
270 271
}

272
/* rwlock should be read locked, or lock should be held */
273 274
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		    const struct ib_gid_attr *val, bool default_gid,
275
		    unsigned long mask, int *pempty)
276
{
277 278 279
	int i = 0;
	int found = -1;
	int empty = pempty ? -1 : 0;
280

281 282 283 284
	while (i < table->sz && (found < 0 || empty < 0)) {
		struct ib_gid_table_entry *data = &table->data_vec[i];
		struct ib_gid_attr *attr = &data->attr;
		int curr_index = i;
285

286
		i++;
287

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
		/* find_gid() is used during GID addition where it is expected
		 * to return a free entry slot which is not duplicate.
		 * Free entry slot is requested and returned if pempty is set,
		 * so lookup free slot only if requested.
		 */
		if (pempty && empty < 0) {
			if (data->props & GID_TABLE_ENTRY_INVALID) {
				/* Found an invalid (free) entry; allocate it */
				if (data->props & GID_TABLE_ENTRY_DEFAULT) {
					if (default_gid)
						empty = curr_index;
				} else {
					empty = curr_index;
				}
			}
		}

		/*
		 * Additionally find_gid() is used to find valid entry during
		 * lookup operation, where validity needs to be checked. So
		 * find the empty entry first to continue to search for a free
		 * slot and ignore its INVALID flag.
		 */
311 312 313 314
		if (data->props & GID_TABLE_ENTRY_INVALID)
			continue;

		if (found >= 0)
315
			continue;
316

317 318 319 320
		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
		    attr->gid_type != val->gid_type)
			continue;

321
		if (mask & GID_ATTR_FIND_MASK_GID &&
322
		    memcmp(gid, &data->gid, sizeof(*gid)))
323
			continue;
324 325 326

		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
		    attr->ndev != val->ndev)
327
			continue;
328 329

		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
330
		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
331
		    default_gid)
332
			continue;
333

334
		found = curr_index;
335 336
	}

337 338 339 340
	if (pempty)
		*pempty = empty;

	return found;
341 342 343 344 345 346 347 348
}

static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
{
	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

349 350 351
static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
			      union ib_gid *gid, struct ib_gid_attr *attr,
			      unsigned long mask, bool default_gid)
352 353
{
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
354
	int ret = 0;
355
	int empty;
356
	int ix;
L
Linus Torvalds 已提交
357

358 359 360 361
	/* Do not allow adding zero GID in support of
	 * IB spec version 1.3 section 4.1.1 point (6) and
	 * section 12.7.10 and section 12.7.20
	 */
362
	if (!memcmp(gid, &zgid, sizeof(*gid)))
L
Linus Torvalds 已提交
363 364
		return -EINVAL;

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

	mutex_lock(&table->lock);

	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
	if (ix >= 0)
		goto out_unlock;

	if (empty < 0) {
		ret = -ENOSPC;
		goto out_unlock;
	}
	attr->device = ib_dev;
	attr->index = empty;
	attr->port_num = port;
	ret = add_modify_gid(table, gid, attr);
	if (!ret)
		dispatch_gid_change_event(ib_dev, port);

out_unlock:
	mutex_unlock(&table->lock);
	if (ret)
		pr_warn("%s: unable to add gid %pI6 error=%d\n",
			__func__, gid->raw, ret);
	return ret;
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct net_device *idev;
	unsigned long mask;
	int ret;

399 400 401 402
	if (ib_dev->get_netdev) {
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev) {
			union ib_gid default_gid;
L
Linus Torvalds 已提交
403

404 405 406 407 408 409 410 411 412 413
			/* Adding default GIDs in not permitted */
			make_default_gid(idev, &default_gid);
			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
				dev_put(idev);
				return -EPERM;
			}
		}
		if (idev)
			dev_put(idev);
	}
L
Linus Torvalds 已提交
414

415 416 417
	mask = GID_ATTR_FIND_MASK_GID |
	       GID_ATTR_FIND_MASK_GID_TYPE |
	       GID_ATTR_FIND_MASK_NETDEV;
418

419
	ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
L
Linus Torvalds 已提交
420 421 422
	return ret;
}

423 424
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
L
Linus Torvalds 已提交
425
{
426
	struct ib_gid_table *table;
427
	int ret = 0;
428 429
	int ix;

430
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
431 432 433 434 435

	mutex_lock(&table->lock);

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
436
		      GID_ATTR_FIND_MASK_GID_TYPE |
437
		      GID_ATTR_FIND_MASK_NETDEV,
438
		      NULL);
439 440
	if (ix < 0) {
		ret = -EINVAL;
441
		goto out_unlock;
442
	}
443

444 445
	del_gid(ib_dev, port, table, ix);
	dispatch_gid_change_event(ib_dev, port);
446 447 448

out_unlock:
	mutex_unlock(&table->lock);
449 450 451 452
	if (ret)
		pr_debug("%s: can't delete gid %pI6 error=%d\n",
			 __func__, gid->raw, ret);
	return ret;
453 454 455 456 457 458 459
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
	struct ib_gid_table *table;
	int ix;
460
	bool deleted = false;
461

462
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
463 464 465

	mutex_lock(&table->lock);

466 467 468 469 470 471
	for (ix = 0; ix < table->sz; ix++) {
		if (table->data_vec[ix].attr.ndev == ndev) {
			del_gid(ib_dev, port, table, ix);
			deleted = true;
		}
	}
472 473

	mutex_unlock(&table->lock);
474 475 476 477

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);

478 479 480 481 482 483 484
	return 0;
}

static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
			      union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
485

486
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
L
Linus Torvalds 已提交
487

488 489
	if (index < 0 || index >= table->sz)
		return -EINVAL;
L
Linus Torvalds 已提交
490

491
	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
		return -EAGAIN;

	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
	if (attr) {
		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
		if (attr->ndev)
			dev_hold(attr->ndev);
	}

	return 0;
}

static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
				    const union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_gid_table *table;
	u8 p;
	int local_index;
513
	unsigned long flags;
514 515

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
516
		table = ib_dev->cache.ports[p].gid;
517
		read_lock_irqsave(&table->rwlock, flags);
518
		local_index = find_gid(table, gid, val, false, mask, NULL);
519 520 521 522 523
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + rdma_start_port(ib_dev);
524
			read_unlock_irqrestore(&table->rwlock, flags);
525
			return 0;
L
Linus Torvalds 已提交
526
		}
527
		read_unlock_irqrestore(&table->rwlock, flags);
L
Linus Torvalds 已提交
528 529
	}

530 531 532 533 534
	return -ENOENT;
}

static int ib_cache_gid_find(struct ib_device *ib_dev,
			     const union ib_gid *gid,
535
			     enum ib_gid_type gid_type,
536 537 538
			     struct net_device *ndev, u8 *port,
			     u16 *index)
{
539 540 541
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
542 543 544 545 546 547 548 549

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
					mask, port, index);
}

550 551 552 553 554 555 556 557 558 559 560 561 562
/**
 * ib_find_cached_gid_by_port - Returns the GID table index where a specified
 * GID value occurs. It searches for the specified GID value in the local
 * software cache.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @port_num: The port number of the device where the GID value should be
 *   searched.
 * @ndev: In RoCE, the net device of the device. Null means ignore.
 * @index: The index into the cached GID table where the GID was found. This
 *   parameter may be NULL.
 */
563 564
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
565
			       enum ib_gid_type gid_type,
566 567
			       u8 port, struct net_device *ndev,
			       u16 *index)
568 569 570
{
	int local_index;
	struct ib_gid_table *table;
571 572 573
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
574
	unsigned long flags;
575

576
	if (!rdma_is_port_valid(ib_dev, port))
577 578
		return -ENOENT;

579
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
580 581 582 583

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

584
	read_lock_irqsave(&table->rwlock, flags);
585
	local_index = find_gid(table, gid, &val, false, mask, NULL);
586 587 588
	if (local_index >= 0) {
		if (index)
			*index = local_index;
589
		read_unlock_irqrestore(&table->rwlock, flags);
590 591 592
		return 0;
	}

593
	read_unlock_irqrestore(&table->rwlock, flags);
594 595
	return -ENOENT;
}
596
EXPORT_SYMBOL(ib_find_cached_gid_by_port);
597

598
/**
599
 * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
600 601 602 603 604 605 606 607 608 609
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
610
 * @index: The index into the cached GID table where the GID was found. This
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table *table;
	unsigned int i;
629
	unsigned long flags;
630 631 632
	bool found = false;


633
	if (!rdma_is_port_valid(ib_dev, port) ||
634 635 636
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

637
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
638

639
	read_lock_irqsave(&table->rwlock, flags);
640 641 642 643
	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
644
			continue;
645 646

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
647
			continue;
648 649 650

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

651
		if (filter(gid, &attr, context)) {
652
			found = true;
653 654
			if (index)
				*index = i;
655
			break;
656
		}
657
	}
658
	read_unlock_irqrestore(&table->rwlock, flags);
659 660 661 662 663 664

	if (!found)
		return -ENOENT;
	return 0;
}

665 666 667 668
static struct ib_gid_table *alloc_gid_table(int sz)
{
	struct ib_gid_table *table =
		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
669
	int i;
670

671 672 673 674 675 676 677 678 679 680
	if (!table)
		return NULL;

	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
	if (!table->data_vec)
		goto err_free_table;

	mutex_init(&table->lock);

	table->sz = sz;
681
	rwlock_init(&table->rwlock);
682

683 684 685 686 687
	/* Mark all entries as invalid so that allocator can allocate
	 * one of the invalid (free) entry.
	 */
	for (i = 0; i < sz; i++)
		table->data_vec[i].props |= GID_TABLE_ENTRY_INVALID;
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
	return table;

err_free_table:
	kfree(table);
	return NULL;
}

static void release_gid_table(struct ib_gid_table *table)
{
	if (table) {
		kfree(table->data_vec);
		kfree(table);
	}
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
				   struct ib_gid_table *table)
{
	int i;
707
	bool deleted = false;
708 709 710 711

	if (!table)
		return;

712
	mutex_lock(&table->lock);
713 714
	for (i = 0; i < table->sz; ++i) {
		if (memcmp(&table->data_vec[i].gid, &zgid,
715 716 717 718
			   sizeof(table->data_vec[i].gid))) {
			del_gid(ib_dev, port, table, i);
			deleted = true;
		}
719
	}
720
	mutex_unlock(&table->lock);
721 722 723

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);
724 725 726 727
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
728
				  unsigned long gid_type_mask,
729 730 731 732 733
				  enum ib_cache_gid_default_mode mode)
{
	union ib_gid gid;
	struct ib_gid_attr gid_attr;
	struct ib_gid_table *table;
734
	unsigned int gid_type;
735
	unsigned long mask;
736

737
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
738 739 740 741 742

	make_default_gid(ndev, &gid);
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

743 744 745 746 747 748 749
	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
		if (1UL << gid_type & ~gid_type_mask)
			continue;

		gid_attr.gid_type = gid_type;

		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
750 751 752 753 754 755
			mask = GID_ATTR_FIND_MASK_GID_TYPE |
			       GID_ATTR_FIND_MASK_DEFAULT;
			__ib_cache_gid_add(ib_dev, port, &gid,
					   &gid_attr, mask, true);
		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
			ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
756
		}
757
	}
758 759 760 761 762
}

static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				     struct ib_gid_table *table)
{
763 764 765 766 767 768 769 770 771 772
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;
	unsigned int current_gid = 0;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	for (i = 0; i < num_default_gids && i < table->sz; i++) {
		struct ib_gid_table_entry *entry =
			&table->data_vec[i];
773 774

		entry->props |= GID_TABLE_ENTRY_DEFAULT;
775 776 777 778
		current_gid = find_next_bit(&roce_gid_type_mask,
					    BITS_PER_LONG,
					    current_gid);
		entry->attr.gid_type = current_gid++;
779 780 781 782 783 784 785 786
	}

	return 0;
}

static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	u8 port;
787
	struct ib_gid_table *table;
788 789 790 791 792
	int err = 0;

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		u8 rdma_port = port + rdma_start_port(ib_dev);

793
		table =
794 795
			alloc_gid_table(
				ib_dev->port_immutable[rdma_port].gid_tbl_len);
796
		if (!table) {
797 798 799 800 801 802
			err = -ENOMEM;
			goto rollback_table_setup;
		}

		err = gid_table_reserve_default(ib_dev,
						port + rdma_start_port(ib_dev),
803
						table);
804 805
		if (err)
			goto rollback_table_setup;
806
		ib_dev->cache.ports[port].gid = table;
807 808 809 810 811 812
	}

	return 0;

rollback_table_setup:
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
813 814
		table = ib_dev->cache.ports[port].gid;

815
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
816 817
				       table);
		release_gid_table(table);
818 819 820 821 822 823 824
	}

	return err;
}

static void gid_table_release_one(struct ib_device *ib_dev)
{
825
	struct ib_gid_table *table;
826 827
	u8 port;

828 829 830 831 832
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
		release_gid_table(table);
		ib_dev->cache.ports[port].gid = NULL;
	}
833 834 835 836
}

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
837
	struct ib_gid_table *table;
838 839
	u8 port;

840 841
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
842
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
843 844
				       table);
	}
845 846 847 848 849 850 851 852 853 854 855
}

static int gid_table_setup_one(struct ib_device *ib_dev)
{
	int err;

	err = _gid_table_setup_one(ib_dev);

	if (err)
		return err;

856
	rdma_roce_rescan_device(ib_dev);
857 858 859 860 861 862 863

	return err;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
864 865
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
866
{
867 868
	int res;
	unsigned long flags;
869
	struct ib_gid_table *table;
870

871
	if (!rdma_is_port_valid(device, port_num))
872 873
		return -EINVAL;

874
	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
875 876 877 878 879
	read_lock_irqsave(&table->rwlock, flags);
	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
	read_unlock_irqrestore(&table->rwlock, flags);

	return res;
880 881 882
}
EXPORT_SYMBOL(ib_get_cached_gid);

883 884 885 886 887 888 889 890 891 892 893 894 895 896
/**
 * ib_find_cached_gid - Returns the port number and GID table index where
 *   a specified GID value occurs.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 * @port_num: The port number of the device where the GID value was found.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_find_cached_gid() searches for the specified GID value in
 * the local software cache.
 */
897 898
int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
899
		       enum ib_gid_type gid_type,
900
		       struct net_device *ndev,
901 902 903
		       u8               *port_num,
		       u16              *index)
{
904
	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
L
Linus Torvalds 已提交
905 906 907
}
EXPORT_SYMBOL(ib_find_cached_gid);

908 909 910 911 912 913 914 915 916
int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
917
	if (!rdma_protocol_roce(device, port_num) && filter)
918 919 920 921 922 923 924
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}

L
Linus Torvalds 已提交
925 926 927 928 929 930 931 932 933
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

934
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
935 936 937 938
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

939
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
940 941 942 943 944 945 946 947 948 949 950 951

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);

952 953 954 955 956 957 958
int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
				u64              *sn_pfx)
{
	unsigned long flags;
	int p;

P
Parav Pandit 已提交
959
	if (!rdma_is_port_valid(device, port_num))
960 961 962 963 964 965 966 967 968 969 970
		return -EINVAL;

	p = port_num - rdma_start_port(device);
	read_lock_irqsave(&device->cache.lock, flags);
	*sn_pfx = device->cache.ports[p].subnet_prefix;
	read_unlock_irqrestore(&device->cache.lock, flags);

	return 0;
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);

L
Linus Torvalds 已提交
971 972 973 974 975 976 977 978 979
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
980
	int partial_ix = -1;
L
Linus Torvalds 已提交
981

982
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
983 984 985 986
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

987
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
988 989 990 991 992

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
993 994 995 996 997 998
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
L
Linus Torvalds 已提交
999 1000
		}

1001 1002 1003 1004 1005
	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}

L
Linus Torvalds 已提交
1006 1007 1008 1009 1010 1011
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

1022
	if (!rdma_is_port_valid(device, port_num))
1023 1024 1025 1026
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

1027
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

J
Jack Morgenstein 已提交
1044 1045 1046 1047 1048 1049 1050
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

1051
	if (!rdma_is_port_valid(device, port_num))
J
Jack Morgenstein 已提交
1052 1053 1054
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1055
	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
J
Jack Morgenstein 已提交
1056 1057 1058 1059 1060 1061
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);

1062 1063 1064 1065 1066 1067 1068
int ib_get_cached_port_state(struct ib_device   *device,
			     u8                  port_num,
			     enum ib_port_state *port_state)
{
	unsigned long flags;
	int ret = 0;

P
Parav Pandit 已提交
1069
	if (!rdma_is_port_valid(device, port_num))
1070 1071 1072
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1073 1074
	*port_state = device->cache.ports[port_num
		- rdma_start_port(device)].port_state;
1075 1076 1077 1078 1079 1080
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_port_state);

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
static int config_non_roce_gid_cache(struct ib_device *device,
				     u8 port, int gid_tbl_len)
{
	struct ib_gid_attr gid_attr = {};
	struct ib_gid_table *table;
	union ib_gid gid;
	int ret = 0;
	int i;

	gid_attr.device = device;
	gid_attr.port_num = port;
	table = device->cache.ports[port - rdma_start_port(device)].gid;

	mutex_lock(&table->lock);
	for (i = 0; i < gid_tbl_len; ++i) {
		if (!device->query_gid)
			continue;
		ret = device->query_gid(device, port, i, &gid);
		if (ret) {
			pr_warn("query_gid failed (%d) for %s (index %d)\n",
				ret, device->name, i);
			goto err;
		}
		gid_attr.index = i;
		add_modify_gid(table, &gid, &gid_attr);
	}
err:
	mutex_unlock(&table->lock);
	return ret;
}

L
Linus Torvalds 已提交
1112
static void ib_cache_update(struct ib_device *device,
1113 1114
			    u8                port,
			    bool	      enforce_security)
L
Linus Torvalds 已提交
1115 1116 1117 1118 1119
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
	int                        i;
	int                        ret;
1120 1121
	struct ib_gid_table	  *table;

1122
	if (!rdma_is_port_valid(device, port))
1123 1124
		return;

1125
	table = device->cache.ports[port - rdma_start_port(device)].gid;
L
Linus Torvalds 已提交
1126 1127 1128 1129 1130 1131 1132

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
P
Parav Pandit 已提交
1133 1134
		pr_warn("ib_query_port failed (%d) for %s\n",
			ret, device->name);
L
Linus Torvalds 已提交
1135 1136 1137
		goto err;
	}

1138 1139 1140 1141 1142 1143 1144
	if (!rdma_protocol_roce(device, port)) {
		ret = config_non_roce_gid_cache(device, port,
						tprops->gid_tbl_len);
		if (ret)
			goto err;
	}

L
Linus Torvalds 已提交
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
P
Parav Pandit 已提交
1155 1156
			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
				ret, device->name, i);
L
Linus Torvalds 已提交
1157 1158 1159 1160 1161 1162
			goto err;
		}
	}

	write_lock_irq(&device->cache.lock);

1163 1164
	old_pkey_cache = device->cache.ports[port -
		rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
1165

1166 1167 1168
	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
	device->cache.ports[port - rdma_start_port(device)].port_state =
J
Jack Wang 已提交
1169
		tprops->state;
J
Jack Morgenstein 已提交
1170

1171 1172
	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
							tprops->subnet_prefix;
L
Linus Torvalds 已提交
1173 1174
	write_unlock_irq(&device->cache.lock);

1175 1176 1177 1178 1179
	if (enforce_security)
		ib_security_cache_change(device,
					 port,
					 tprops->subnet_prefix);

L
Linus Torvalds 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(tprops);
}

D
David Howells 已提交
1189
static void ib_cache_task(struct work_struct *_work)
L
Linus Torvalds 已提交
1190
{
D
David Howells 已提交
1191 1192
	struct ib_update_work *work =
		container_of(_work, struct ib_update_work, work);
L
Linus Torvalds 已提交
1193

1194 1195 1196
	ib_cache_update(work->device,
			work->port_num,
			work->enforce_security);
L
Linus Torvalds 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	kfree(work);
}

static void ib_cache_event(struct ib_event_handler *handler,
			   struct ib_event *event)
{
	struct ib_update_work *work;

	if (event->event == IB_EVENT_PORT_ERR    ||
	    event->event == IB_EVENT_PORT_ACTIVE ||
	    event->event == IB_EVENT_LID_CHANGE  ||
	    event->event == IB_EVENT_PKEY_CHANGE ||
1209
	    event->event == IB_EVENT_SM_CHANGE   ||
O
Or Gerlitz 已提交
1210 1211
	    event->event == IB_EVENT_CLIENT_REREGISTER ||
	    event->event == IB_EVENT_GID_CHANGE) {
L
Linus Torvalds 已提交
1212 1213
		work = kmalloc(sizeof *work, GFP_ATOMIC);
		if (work) {
D
David Howells 已提交
1214
			INIT_WORK(&work->work, ib_cache_task);
L
Linus Torvalds 已提交
1215 1216
			work->device   = event->device;
			work->port_num = event->element.port_num;
1217 1218 1219 1220 1221 1222
			if (event->event == IB_EVENT_PKEY_CHANGE ||
			    event->event == IB_EVENT_GID_CHANGE)
				work->enforce_security = true;
			else
				work->enforce_security = false;

T
Tejun Heo 已提交
1223
			queue_work(ib_wq, &work->work);
L
Linus Torvalds 已提交
1224 1225 1226 1227
		}
	}
}

1228
int ib_cache_setup_one(struct ib_device *device)
L
Linus Torvalds 已提交
1229 1230
{
	int p;
1231
	int err;
L
Linus Torvalds 已提交
1232 1233 1234

	rwlock_init(&device->cache.lock);

1235 1236
	device->cache.ports =
		kzalloc(sizeof(*device->cache.ports) *
1237
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1238 1239
	if (!device->cache.ports)
		return -ENOMEM;
L
Linus Torvalds 已提交
1240

1241
	err = gid_table_setup_one(device);
1242 1243 1244 1245 1246
	if (err) {
		kfree(device->cache.ports);
		device->cache.ports = NULL;
		return err;
	}
1247

1248
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1249
		ib_cache_update(device, p + rdma_start_port(device), true);
L
Linus Torvalds 已提交
1250 1251 1252

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
1253
	ib_register_event_handler(&device->cache.event_handler);
1254
	return 0;
L
Linus Torvalds 已提交
1255 1256
}

1257
void ib_cache_release_one(struct ib_device *device)
L
Linus Torvalds 已提交
1258 1259 1260
{
	int p;

1261 1262 1263 1264 1265 1266
	/*
	 * The release function frees all the cache elements.
	 * This function should be called as part of freeing
	 * all the device's resources when the cache could no
	 * longer be accessed.
	 */
1267 1268
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
		kfree(device->cache.ports[p].pkey);
1269 1270

	gid_table_release_one(device);
1271
	kfree(device->cache.ports);
L
Linus Torvalds 已提交
1272 1273
}

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
void ib_cache_cleanup_one(struct ib_device *device)
{
	/* The cleanup function unregisters the event handler,
	 * waits for all in-progress workqueue elements and cleans
	 * up the GID cache. This function should be called after
	 * the device was removed from the devices list and all
	 * clients were removed, so the cache exists but is
	 * non-functional and shouldn't be updated anymore.
	 */
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);
	gid_table_cleanup_one(device);
}
L
Linus Torvalds 已提交
1287

1288
void __init ib_cache_setup(void)
L
Linus Torvalds 已提交
1289
{
1290
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1291 1292 1293 1294
}

void __exit ib_cache_cleanup(void)
{
1295
	roce_gid_mgmt_cleanup();
L
Linus Torvalds 已提交
1296
}