cache.c 31.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Intel Corporation. All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
A
Alexey Dobriyan 已提交
39
#include <linux/workqueue.h>
40 41
#include <linux/netdevice.h>
#include <net/addrconf.h>
L
Linus Torvalds 已提交
42

43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57

#include "core_priv.h"

struct ib_pkey_cache {
	int             table_len;
	u16             table[0];
};

struct ib_update_work {
	struct work_struct work;
	struct ib_device  *device;
	u8                 port_num;
};

58 59
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
60 61 62 63 64 65 66

static const struct ib_gid_attr zattr;

enum gid_attr_find_mask {
	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
67
	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
};

enum gid_table_entry_props {
	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
};

enum gid_table_write_action {
	GID_TABLE_WRITE_ACTION_ADD,
	GID_TABLE_WRITE_ACTION_DEL,
	/* MODIFY only updates the GID table. Currently only used by
	 * ib_cache_update.
	 */
	GID_TABLE_WRITE_ACTION_MODIFY
};

struct ib_gid_table_entry {
	unsigned long	    props;
	union ib_gid        gid;
	struct ib_gid_attr  attr;
	void		   *context;
};

struct ib_gid_table {
	int                  sz;
	/* In RoCE, adding a GID to the table requires:
	 * (a) Find if this GID is already exists.
	 * (b) Find a free space.
	 * (c) Write the new GID
	 *
	 * Delete requires different set of operations:
	 * (a) Find the GID
	 * (b) Delete it.
	 *
	 * Add/delete should be carried out atomically.
	 * This is done by locking this mutex from multiple
	 * writers. We don't need this lock for IB, as the MAD
	 * layer replaces all entries. All data_vec entries
	 * are locked by this lock.
	 **/
	struct mutex         lock;
109 110 111 112
	/* This lock protects the table entries from being
	 * read and written simultaneously.
	 */
	rwlock_t	     rwlock;
113 114 115
	struct ib_gid_table_entry *data_vec;
};

116 117 118 119 120 121 122 123 124 125 126 127 128
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		struct ib_event event;

		event.device		= ib_dev;
		event.element.port_num	= port;
		event.event		= IB_EVENT_GID_CHANGE;

		ib_dispatch_event(&event);
	}
}

129 130
static const char * const gid_type_str[] = {
	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
131
	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
132 133 134 135 136 137 138 139 140 141 142
};

const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
{
	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
		return gid_type_str[gid_type];

	return "Invalid GID type";
}
EXPORT_SYMBOL(ib_cache_gid_type_str);

M
Matan Barak 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
int ib_cache_gid_parse_type_str(const char *buf)
{
	unsigned int i;
	size_t len;
	int err = -EINVAL;

	len = strlen(buf);
	if (len == 0)
		return -EINVAL;

	if (buf[len - 1] == '\n')
		len--;

	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
		    len == strlen(gid_type_str[i])) {
			err = i;
			break;
		}

	return err;
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

167 168 169 170
/* This function expects that rwlock will be write locked in all
 * scenarios and that lock will be locked in sleep-able (RoCE)
 * scenarios.
 */
171 172 173 174 175 176
static int write_gid(struct ib_device *ib_dev, u8 port,
		     struct ib_gid_table *table, int ix,
		     const union ib_gid *gid,
		     const struct ib_gid_attr *attr,
		     enum gid_table_write_action action,
		     bool  default_gid)
177
	__releases(&table->rwlock) __acquires(&table->rwlock)
L
Linus Torvalds 已提交
178
{
179 180 181 182 183 184 185 186 187
	int ret = 0;
	struct net_device *old_net_dev;

	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
	 * sleep-able lock.
	 */

	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
188
		write_unlock_irq(&table->rwlock);
189 190 191 192 193 194 195 196 197
		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
		 * RoCE providers and thus only updates the cache.
		 */
		if (action == GID_TABLE_WRITE_ACTION_ADD)
			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
					      &table->data_vec[ix].context);
		else if (action == GID_TABLE_WRITE_ACTION_DEL)
			ret = ib_dev->del_gid(ib_dev, port, ix,
					      &table->data_vec[ix].context);
198
		write_lock_irq(&table->rwlock);
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	}

	old_net_dev = table->data_vec[ix].attr.ndev;
	if (old_net_dev && old_net_dev != attr->ndev)
		dev_put(old_net_dev);
	/* if modify_gid failed, just delete the old gid */
	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
		gid = &zgid;
		attr = &zattr;
		table->data_vec[ix].context = NULL;
	}
	if (default_gid)
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
	if (table->data_vec[ix].attr.ndev &&
	    table->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(table->data_vec[ix].attr.ndev);

	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;

	return ret;
}

static int add_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   const union ib_gid *gid,
		   const struct ib_gid_attr *attr,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
}

static int modify_gid(struct ib_device *ib_dev, u8 port,
		      struct ib_gid_table *table, int ix,
		      const union ib_gid *gid,
		      const struct ib_gid_attr *attr,
		      bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
}

static int del_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
}

248
/* rwlock should be read locked */
249 250
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		    const struct ib_gid_attr *val, bool default_gid,
251
		    unsigned long mask, int *pempty)
252
{
253 254 255
	int i = 0;
	int found = -1;
	int empty = pempty ? -1 : 0;
256

257 258 259 260
	while (i < table->sz && (found < 0 || empty < 0)) {
		struct ib_gid_table_entry *data = &table->data_vec[i];
		struct ib_gid_attr *attr = &data->attr;
		int curr_index = i;
261

262
		i++;
263

264 265 266 267 268 269 270 271 272 273
		if (data->props & GID_TABLE_ENTRY_INVALID)
			continue;

		if (empty < 0)
			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
			    !memcmp(attr, &zattr, sizeof(*attr)) &&
			    !data->props)
				empty = curr_index;

		if (found >= 0)
274
			continue;
275

276 277 278 279
		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
		    attr->gid_type != val->gid_type)
			continue;

280
		if (mask & GID_ATTR_FIND_MASK_GID &&
281
		    memcmp(gid, &data->gid, sizeof(*gid)))
282
			continue;
283 284 285

		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
		    attr->ndev != val->ndev)
286
			continue;
287 288

		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
289
		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
290
		    default_gid)
291
			continue;
292

293
		found = curr_index;
294 295
	}

296 297 298 299
	if (pempty)
		*pempty = empty;

	return found;
300 301 302 303 304 305 306 307 308 309 310 311 312 313
}

static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
{
	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;
L
Linus Torvalds 已提交
314
	int ret = 0;
315
	struct net_device *idev;
316
	int empty;
L
Linus Torvalds 已提交
317

318 319 320
	table = ports_table[port - rdma_start_port(ib_dev)];

	if (!memcmp(gid, &zgid, sizeof(*gid)))
L
Linus Torvalds 已提交
321 322
		return -EINVAL;

323 324 325 326
	if (ib_dev->get_netdev) {
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev) {
			union ib_gid default_gid;
L
Linus Torvalds 已提交
327

328 329 330 331 332 333 334 335 336 337
			/* Adding default GIDs in not permitted */
			make_default_gid(idev, &default_gid);
			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
				dev_put(idev);
				return -EPERM;
			}
		}
		if (idev)
			dev_put(idev);
	}
L
Linus Torvalds 已提交
338

339
	mutex_lock(&table->lock);
340
	write_lock_irq(&table->rwlock);
L
Linus Torvalds 已提交
341

342
	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
343
		      GID_ATTR_FIND_MASK_GID_TYPE |
344
		      GID_ATTR_FIND_MASK_NETDEV, &empty);
345 346
	if (ix >= 0)
		goto out_unlock;
L
Linus Torvalds 已提交
347

348
	if (empty < 0) {
349 350 351 352
		ret = -ENOSPC;
		goto out_unlock;
	}

353
	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
354 355
	if (!ret)
		dispatch_gid_change_event(ib_dev, port);
356 357

out_unlock:
358
	write_unlock_irq(&table->rwlock);
359
	mutex_unlock(&table->lock);
L
Linus Torvalds 已提交
360 361 362
	return ret;
}

363 364
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
L
Linus Torvalds 已提交
365
{
366 367 368 369 370 371 372
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;

	table = ports_table[port - rdma_start_port(ib_dev)];

	mutex_lock(&table->lock);
373
	write_lock_irq(&table->rwlock);
374 375 376

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
377
		      GID_ATTR_FIND_MASK_GID_TYPE |
378
		      GID_ATTR_FIND_MASK_NETDEV	  |
379 380
		      GID_ATTR_FIND_MASK_DEFAULT,
		      NULL);
381 382 383
	if (ix < 0)
		goto out_unlock;

384 385
	if (!del_gid(ib_dev, port, table, ix, false))
		dispatch_gid_change_event(ib_dev, port);
386 387

out_unlock:
388
	write_unlock_irq(&table->rwlock);
389 390 391 392 393 394 395 396 397 398
	mutex_unlock(&table->lock);
	return 0;
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;
399
	bool deleted = false;
400 401 402 403

	table  = ports_table[port - rdma_start_port(ib_dev)];

	mutex_lock(&table->lock);
404
	write_lock_irq(&table->rwlock);
405 406 407

	for (ix = 0; ix < table->sz; ix++)
		if (table->data_vec[ix].attr.ndev == ndev)
408 409
			if (!del_gid(ib_dev, port, table, ix, false))
				deleted = true;
410

411
	write_unlock_irq(&table->rwlock);
412
	mutex_unlock(&table->lock);
413 414 415 416

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);

417 418 419 420 421 422 423 424
	return 0;
}

static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
			      union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
425

426
	table = ports_table[port - rdma_start_port(ib_dev)];
L
Linus Torvalds 已提交
427

428 429
	if (index < 0 || index >= table->sz)
		return -EINVAL;
L
Linus Torvalds 已提交
430

431
	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		return -EAGAIN;

	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
	if (attr) {
		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
		if (attr->ndev)
			dev_hold(attr->ndev);
	}

	return 0;
}

static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
				    const union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	u8 p;
	int local_index;
454
	unsigned long flags;
455 456 457

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
		table = ports_table[p];
458
		read_lock_irqsave(&table->rwlock, flags);
459
		local_index = find_gid(table, gid, val, false, mask, NULL);
460 461 462 463 464
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + rdma_start_port(ib_dev);
465
			read_unlock_irqrestore(&table->rwlock, flags);
466
			return 0;
L
Linus Torvalds 已提交
467
		}
468
		read_unlock_irqrestore(&table->rwlock, flags);
L
Linus Torvalds 已提交
469 470
	}

471 472 473 474 475
	return -ENOENT;
}

static int ib_cache_gid_find(struct ib_device *ib_dev,
			     const union ib_gid *gid,
476
			     enum ib_gid_type gid_type,
477 478 479
			     struct net_device *ndev, u8 *port,
			     u16 *index)
{
480 481 482
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
483 484 485 486 487 488 489 490

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
					mask, port, index);
}

491 492
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
493
			       enum ib_gid_type gid_type,
494 495
			       u8 port, struct net_device *ndev,
			       u16 *index)
496 497 498 499
{
	int local_index;
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
500 501 502
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
503
	unsigned long flags;
504 505 506 507 508 509 510 511 512 513

	if (port < rdma_start_port(ib_dev) ||
	    port > rdma_end_port(ib_dev))
		return -ENOENT;

	table = ports_table[port - rdma_start_port(ib_dev)];

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

514
	read_lock_irqsave(&table->rwlock, flags);
515
	local_index = find_gid(table, gid, &val, false, mask, NULL);
516 517 518
	if (local_index >= 0) {
		if (index)
			*index = local_index;
519
		read_unlock_irqrestore(&table->rwlock, flags);
520 521 522
		return 0;
	}

523
	read_unlock_irqrestore(&table->rwlock, flags);
524 525
	return -ENOENT;
}
526
EXPORT_SYMBOL(ib_find_cached_gid_by_port);
527

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
/**
 * ib_find_gid_by_filter - Returns the GID table index where a specified
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	unsigned int i;
560
	unsigned long flags;
561 562 563 564 565 566 567 568 569 570 571 572
	bool found = false;

	if (!ports_table)
		return -EOPNOTSUPP;

	if (port < rdma_start_port(ib_dev) ||
	    port > rdma_end_port(ib_dev) ||
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

	table = ports_table[port - rdma_start_port(ib_dev)];

573
	read_lock_irqsave(&table->rwlock, flags);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
			goto next;

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
			goto next;

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

		if (filter(gid, &attr, context))
			found = true;

next:
		if (found)
			break;
	}
592
	read_unlock_irqrestore(&table->rwlock, flags);
593 594 595 596 597 598 599 600 601

	if (!found)
		return -ENOENT;

	if (index)
		*index = i;
	return 0;
}

602 603 604 605
static struct ib_gid_table *alloc_gid_table(int sz)
{
	struct ib_gid_table *table =
		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
606

607 608 609 610 611 612 613 614 615 616
	if (!table)
		return NULL;

	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
	if (!table->data_vec)
		goto err_free_table;

	mutex_init(&table->lock);

	table->sz = sz;
617
	rwlock_init(&table->rwlock);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	return table;

err_free_table:
	kfree(table);
	return NULL;
}

static void release_gid_table(struct ib_gid_table *table)
{
	if (table) {
		kfree(table->data_vec);
		kfree(table);
	}
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
				   struct ib_gid_table *table)
{
	int i;
638
	bool deleted = false;
639 640 641 642

	if (!table)
		return;

643
	write_lock_irq(&table->rwlock);
644 645 646
	for (i = 0; i < table->sz; ++i) {
		if (memcmp(&table->data_vec[i].gid, &zgid,
			   sizeof(table->data_vec[i].gid)))
647 648 649 650
			if (!del_gid(ib_dev, port, table, i,
				     table->data_vec[i].props &
				     GID_ATTR_FIND_MASK_DEFAULT))
				deleted = true;
651
	}
652 653 654 655
	write_unlock_irq(&table->rwlock);

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);
656 657 658 659
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
660
				  unsigned long gid_type_mask,
661 662 663 664 665
				  enum ib_cache_gid_default_mode mode)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	union ib_gid gid;
	struct ib_gid_attr gid_attr;
666
	struct ib_gid_attr zattr_type = zattr;
667
	struct ib_gid_table *table;
668
	unsigned int gid_type;
669 670 671 672 673 674 675

	table  = ports_table[port - rdma_start_port(ib_dev)];

	make_default_gid(ndev, &gid);
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
		int ix;
		union ib_gid current_gid;
		struct ib_gid_attr current_gid_attr = {};

		if (1UL << gid_type & ~gid_type_mask)
			continue;

		gid_attr.gid_type = gid_type;

		mutex_lock(&table->lock);
		write_lock_irq(&table->rwlock);
		ix = find_gid(table, NULL, &gid_attr, true,
			      GID_ATTR_FIND_MASK_GID_TYPE |
			      GID_ATTR_FIND_MASK_DEFAULT,
			      NULL);

		/* Coudn't find default GID location */
		WARN_ON(ix < 0);

		zattr_type.gid_type = gid_type;

		if (!__ib_cache_gid_get(ib_dev, port, ix,
					&current_gid, &current_gid_attr) &&
		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
			goto release;

		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
		    memcmp(&current_gid_attr, &zattr_type,
			   sizeof(current_gid_attr))) {
			if (del_gid(ib_dev, port, table, ix, true)) {
				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
					ix, gid.raw);
				goto release;
			} else {
				dispatch_gid_change_event(ib_dev, port);
			}
715
		}
716

717 718 719 720 721 722
		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
					gid.raw);
			else
				dispatch_gid_change_event(ib_dev, port);
723
		}
724

725 726 727 728 729 730
release:
		if (current_gid_attr.ndev)
			dev_put(current_gid_attr.ndev);
		write_unlock_irq(&table->rwlock);
		mutex_unlock(&table->lock);
	}
731 732 733 734 735
}

static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				     struct ib_gid_table *table)
{
736 737 738 739 740 741 742 743 744 745
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;
	unsigned int current_gid = 0;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	for (i = 0; i < num_default_gids && i < table->sz; i++) {
		struct ib_gid_table_entry *entry =
			&table->data_vec[i];
746 747

		entry->props |= GID_TABLE_ENTRY_DEFAULT;
748 749 750 751
		current_gid = find_next_bit(&roce_gid_type_mask,
					    BITS_PER_LONG,
					    current_gid);
		entry->attr.gid_type = current_gid++;
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
	}

	return 0;
}

static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	u8 port;
	struct ib_gid_table **table;
	int err = 0;

	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);

	if (!table) {
		pr_warn("failed to allocate ib gid cache for %s\n",
			ib_dev->name);
		return -ENOMEM;
	}

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		u8 rdma_port = port + rdma_start_port(ib_dev);

		table[port] =
			alloc_gid_table(
				ib_dev->port_immutable[rdma_port].gid_tbl_len);
		if (!table[port]) {
			err = -ENOMEM;
			goto rollback_table_setup;
		}

		err = gid_table_reserve_default(ib_dev,
						port + rdma_start_port(ib_dev),
						table[port]);
		if (err)
			goto rollback_table_setup;
	}

	ib_dev->cache.gid_cache = table;
	return 0;

rollback_table_setup:
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
				       table[port]);
		release_gid_table(table[port]);
	}

	kfree(table);
	return err;
}

static void gid_table_release_one(struct ib_device *ib_dev)
{
	struct ib_gid_table **table = ib_dev->cache.gid_cache;
	u8 port;

	if (!table)
		return;

	for (port = 0; port < ib_dev->phys_port_cnt; port++)
		release_gid_table(table[port]);

	kfree(table);
	ib_dev->cache.gid_cache = NULL;
}

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
	struct ib_gid_table **table = ib_dev->cache.gid_cache;
	u8 port;

	if (!table)
		return;

	for (port = 0; port < ib_dev->phys_port_cnt; port++)
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
				       table[port]);
}

static int gid_table_setup_one(struct ib_device *ib_dev)
{
	int err;

	err = _gid_table_setup_one(ib_dev);

	if (err)
		return err;

	err = roce_rescan_device(ib_dev);

	if (err) {
		gid_table_cleanup_one(ib_dev);
		gid_table_release_one(ib_dev);
	}

	return err;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
853 854
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
855
{
856 857 858 859 860
	int res;
	unsigned long flags;
	struct ib_gid_table **ports_table = device->cache.gid_cache;
	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];

861 862 863
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

864 865 866 867 868
	read_lock_irqsave(&table->rwlock, flags);
	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
	read_unlock_irqrestore(&table->rwlock, flags);

	return res;
869 870 871 872 873
}
EXPORT_SYMBOL(ib_get_cached_gid);

int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
874
		       enum ib_gid_type gid_type,
875
		       struct net_device *ndev,
876 877 878
		       u8               *port_num,
		       u16              *index)
{
879
	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
L
Linus Torvalds 已提交
880 881 882
}
EXPORT_SYMBOL(ib_find_cached_gid);

883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}
EXPORT_SYMBOL(ib_find_gid_by_filter);

L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908 909
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

910
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
L
Linus Torvalds 已提交
911 912 913 914
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

915
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
L
Linus Torvalds 已提交
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);

int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
937
	int partial_ix = -1;
L
Linus Torvalds 已提交
938

939
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
L
Linus Torvalds 已提交
940 941 942 943
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

944
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
L
Linus Torvalds 已提交
945 946 947 948 949

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
950 951 952 953 954 955
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
L
Linus Torvalds 已提交
956 957
		}

958 959 960 961 962
	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}

L
Linus Torvalds 已提交
963 964 965 966 967 968
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);

969 970 971 972 973 974 975 976 977 978
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

979
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
980 981 982 983
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

984
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

J
Jack Morgenstein 已提交
1001 1002 1003 1004 1005 1006 1007
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

1008
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
J
Jack Morgenstein 已提交
1009 1010 1011
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1012
	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
J
Jack Morgenstein 已提交
1013 1014 1015 1016 1017 1018
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);

L
Linus Torvalds 已提交
1019 1020 1021 1022 1023
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1024 1025 1026 1027
	struct ib_gid_cache {
		int             table_len;
		union ib_gid    table[0];
	}			  *gid_cache = NULL;
L
Linus Torvalds 已提交
1028 1029
	int                        i;
	int                        ret;
1030 1031 1032 1033 1034 1035 1036 1037 1038
	struct ib_gid_table	  *table;
	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
	bool			   use_roce_gid_table =
					rdma_cap_roce_gid_table(device, port);

	if (port < rdma_start_port(device) || port > rdma_end_port(device))
		return;

	table = ports_table[port - rdma_start_port(device)];
L
Linus Torvalds 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
		       ret, device->name);
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

1058 1059 1060 1061 1062
	if (!use_roce_gid_table) {
		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
			    sizeof(*gid_cache->table), GFP_KERNEL);
		if (!gid_cache)
			goto err;
L
Linus Torvalds 已提交
1063

1064 1065
		gid_cache->table_len = tprops->gid_tbl_len;
	}
L
Linus Torvalds 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
			       ret, device->name, i);
			goto err;
		}
	}

1076 1077 1078
	if (!use_roce_gid_table) {
		for (i = 0;  i < gid_cache->table_len; ++i) {
			ret = ib_query_gid(device, port, i,
1079
					   gid_cache->table + i, NULL);
1080 1081 1082 1083 1084
			if (ret) {
				printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
				       ret, device->name, i);
				goto err;
			}
L
Linus Torvalds 已提交
1085 1086 1087 1088 1089
		}
	}

	write_lock_irq(&device->cache.lock);

1090
	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
L
Linus Torvalds 已提交
1091

1092
	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1093
	if (!use_roce_gid_table) {
1094
		write_lock(&table->rwlock);
1095 1096 1097 1098
		for (i = 0; i < gid_cache->table_len; i++) {
			modify_gid(device, port, table, i, gid_cache->table + i,
				   &zattr, false);
		}
1099
		write_unlock(&table->rwlock);
1100
	}
L
Linus Torvalds 已提交
1101

1102
	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
J
Jack Morgenstein 已提交
1103

L
Linus Torvalds 已提交
1104 1105
	write_unlock_irq(&device->cache.lock);

1106
	kfree(gid_cache);
L
Linus Torvalds 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}

D
David Howells 已提交
1117
static void ib_cache_task(struct work_struct *_work)
L
Linus Torvalds 已提交
1118
{
D
David Howells 已提交
1119 1120
	struct ib_update_work *work =
		container_of(_work, struct ib_update_work, work);
L
Linus Torvalds 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134

	ib_cache_update(work->device, work->port_num);
	kfree(work);
}

static void ib_cache_event(struct ib_event_handler *handler,
			   struct ib_event *event)
{
	struct ib_update_work *work;

	if (event->event == IB_EVENT_PORT_ERR    ||
	    event->event == IB_EVENT_PORT_ACTIVE ||
	    event->event == IB_EVENT_LID_CHANGE  ||
	    event->event == IB_EVENT_PKEY_CHANGE ||
1135
	    event->event == IB_EVENT_SM_CHANGE   ||
O
Or Gerlitz 已提交
1136 1137
	    event->event == IB_EVENT_CLIENT_REREGISTER ||
	    event->event == IB_EVENT_GID_CHANGE) {
L
Linus Torvalds 已提交
1138 1139
		work = kmalloc(sizeof *work, GFP_ATOMIC);
		if (work) {
D
David Howells 已提交
1140
			INIT_WORK(&work->work, ib_cache_task);
L
Linus Torvalds 已提交
1141 1142
			work->device   = event->device;
			work->port_num = event->element.port_num;
T
Tejun Heo 已提交
1143
			queue_work(ib_wq, &work->work);
L
Linus Torvalds 已提交
1144 1145 1146 1147
		}
	}
}

1148
int ib_cache_setup_one(struct ib_device *device)
L
Linus Torvalds 已提交
1149 1150
{
	int p;
1151
	int err;
L
Linus Torvalds 已提交
1152 1153 1154 1155

	rwlock_init(&device->cache.lock);

	device->cache.pkey_cache =
1156
		kzalloc(sizeof *device->cache.pkey_cache *
1157
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
J
Jack Morgenstein 已提交
1158
	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1159 1160
					  (rdma_end_port(device) -
					   rdma_start_port(device) + 1),
J
Jack Morgenstein 已提交
1161
					  GFP_KERNEL);
1162
	if (!device->cache.pkey_cache ||
J
Jack Morgenstein 已提交
1163
	    !device->cache.lmc_cache) {
L
Linus Torvalds 已提交
1164 1165
		printk(KERN_WARNING "Couldn't allocate cache "
		       "for %s\n", device->name);
1166
		return -ENOMEM;
L
Linus Torvalds 已提交
1167 1168
	}

1169 1170 1171 1172 1173
	err = gid_table_setup_one(device);
	if (err)
		/* Allocated memory will be cleaned in the release function */
		return err;

1174
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1175
		ib_cache_update(device, p + rdma_start_port(device));
L
Linus Torvalds 已提交
1176 1177 1178

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
1179 1180 1181
	err = ib_register_event_handler(&device->cache.event_handler);
	if (err)
		goto err;
L
Linus Torvalds 已提交
1182

1183
	return 0;
L
Linus Torvalds 已提交
1184 1185

err:
1186 1187
	gid_table_cleanup_one(device);
	return err;
L
Linus Torvalds 已提交
1188 1189
}

1190
void ib_cache_release_one(struct ib_device *device)
L
Linus Torvalds 已提交
1191 1192 1193
{
	int p;

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	/*
	 * The release function frees all the cache elements.
	 * This function should be called as part of freeing
	 * all the device's resources when the cache could no
	 * longer be accessed.
	 */
	if (device->cache.pkey_cache)
		for (p = 0;
		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
			kfree(device->cache.pkey_cache[p]);

	gid_table_release_one(device);
L
Linus Torvalds 已提交
1206
	kfree(device->cache.pkey_cache);
J
Jack Morgenstein 已提交
1207
	kfree(device->cache.lmc_cache);
L
Linus Torvalds 已提交
1208 1209
}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
void ib_cache_cleanup_one(struct ib_device *device)
{
	/* The cleanup function unregisters the event handler,
	 * waits for all in-progress workqueue elements and cleans
	 * up the GID cache. This function should be called after
	 * the device was removed from the devices list and all
	 * clients were removed, so the cache exists but is
	 * non-functional and shouldn't be updated anymore.
	 */
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);
	gid_table_cleanup_one(device);
}
L
Linus Torvalds 已提交
1223

1224
void __init ib_cache_setup(void)
L
Linus Torvalds 已提交
1225
{
1226
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1227 1228 1229 1230
}

void __exit ib_cache_cleanup(void)
{
1231
	roce_gid_mgmt_cleanup();
L
Linus Torvalds 已提交
1232
}