cache.c 31.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Intel Corporation. All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
A
Alexey Dobriyan 已提交
39
#include <linux/workqueue.h>
40 41
#include <linux/netdevice.h>
#include <net/addrconf.h>
L
Linus Torvalds 已提交
42

43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57

#include "core_priv.h"

struct ib_pkey_cache {
	int             table_len;
	u16             table[0];
};

struct ib_update_work {
	struct work_struct work;
	struct ib_device  *device;
	u8                 port_num;
};

58 59
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
60 61 62 63 64 65 66

static const struct ib_gid_attr zattr;

enum gid_attr_find_mask {
	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
67
	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
};

enum gid_table_entry_props {
	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
};

enum gid_table_write_action {
	GID_TABLE_WRITE_ACTION_ADD,
	GID_TABLE_WRITE_ACTION_DEL,
	/* MODIFY only updates the GID table. Currently only used by
	 * ib_cache_update.
	 */
	GID_TABLE_WRITE_ACTION_MODIFY
};

struct ib_gid_table_entry {
	unsigned long	    props;
	union ib_gid        gid;
	struct ib_gid_attr  attr;
	void		   *context;
};

struct ib_gid_table {
	int                  sz;
	/* In RoCE, adding a GID to the table requires:
	 * (a) Find if this GID is already exists.
	 * (b) Find a free space.
	 * (c) Write the new GID
	 *
	 * Delete requires different set of operations:
	 * (a) Find the GID
	 * (b) Delete it.
	 *
	 * Add/delete should be carried out atomically.
	 * This is done by locking this mutex from multiple
	 * writers. We don't need this lock for IB, as the MAD
	 * layer replaces all entries. All data_vec entries
	 * are locked by this lock.
	 **/
	struct mutex         lock;
109 110 111 112
	/* This lock protects the table entries from being
	 * read and written simultaneously.
	 */
	rwlock_t	     rwlock;
113 114 115
	struct ib_gid_table_entry *data_vec;
};

116 117 118 119 120 121 122 123 124 125 126 127 128
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		struct ib_event event;

		event.device		= ib_dev;
		event.element.port_num	= port;
		event.event		= IB_EVENT_GID_CHANGE;

		ib_dispatch_event(&event);
	}
}

129 130
static const char * const gid_type_str[] = {
	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
131
	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
132 133 134 135 136 137 138 139 140 141 142
};

const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
{
	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
		return gid_type_str[gid_type];

	return "Invalid GID type";
}
EXPORT_SYMBOL(ib_cache_gid_type_str);

M
Matan Barak 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
int ib_cache_gid_parse_type_str(const char *buf)
{
	unsigned int i;
	size_t len;
	int err = -EINVAL;

	len = strlen(buf);
	if (len == 0)
		return -EINVAL;

	if (buf[len - 1] == '\n')
		len--;

	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
		    len == strlen(gid_type_str[i])) {
			err = i;
			break;
		}

	return err;
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

167 168 169 170
/* This function expects that rwlock will be write locked in all
 * scenarios and that lock will be locked in sleep-able (RoCE)
 * scenarios.
 */
171 172 173 174 175 176
static int write_gid(struct ib_device *ib_dev, u8 port,
		     struct ib_gid_table *table, int ix,
		     const union ib_gid *gid,
		     const struct ib_gid_attr *attr,
		     enum gid_table_write_action action,
		     bool  default_gid)
177
	__releases(&table->rwlock) __acquires(&table->rwlock)
L
Linus Torvalds 已提交
178
{
179 180
	int ret = 0;
	struct net_device *old_net_dev;
181
	enum ib_gid_type old_gid_type;
182 183 184 185 186 187 188

	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
	 * sleep-able lock.
	 */

	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
189
		write_unlock_irq(&table->rwlock);
190 191 192 193 194 195 196 197 198
		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
		 * RoCE providers and thus only updates the cache.
		 */
		if (action == GID_TABLE_WRITE_ACTION_ADD)
			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
					      &table->data_vec[ix].context);
		else if (action == GID_TABLE_WRITE_ACTION_DEL)
			ret = ib_dev->del_gid(ib_dev, port, ix,
					      &table->data_vec[ix].context);
199
		write_lock_irq(&table->rwlock);
200 201 202
	}

	old_net_dev = table->data_vec[ix].attr.ndev;
203
	old_gid_type = table->data_vec[ix].attr.gid_type;
204 205 206 207 208 209 210 211
	if (old_net_dev && old_net_dev != attr->ndev)
		dev_put(old_net_dev);
	/* if modify_gid failed, just delete the old gid */
	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
		gid = &zgid;
		attr = &zattr;
		table->data_vec[ix].context = NULL;
	}
212

213 214
	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
215 216 217 218 219
	if (default_gid) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
		if (action == GID_TABLE_WRITE_ACTION_DEL)
			table->data_vec[ix].attr.gid_type = old_gid_type;
	}
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	if (table->data_vec[ix].attr.ndev &&
	    table->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(table->data_vec[ix].attr.ndev);

	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;

	return ret;
}

static int add_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   const union ib_gid *gid,
		   const struct ib_gid_attr *attr,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
}

static int modify_gid(struct ib_device *ib_dev, u8 port,
		      struct ib_gid_table *table, int ix,
		      const union ib_gid *gid,
		      const struct ib_gid_attr *attr,
		      bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
}

static int del_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
}

254
/* rwlock should be read locked */
255 256
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		    const struct ib_gid_attr *val, bool default_gid,
257
		    unsigned long mask, int *pempty)
258
{
259 260 261
	int i = 0;
	int found = -1;
	int empty = pempty ? -1 : 0;
262

263 264 265 266
	while (i < table->sz && (found < 0 || empty < 0)) {
		struct ib_gid_table_entry *data = &table->data_vec[i];
		struct ib_gid_attr *attr = &data->attr;
		int curr_index = i;
267

268
		i++;
269

270 271 272 273 274 275 276 277 278 279
		if (data->props & GID_TABLE_ENTRY_INVALID)
			continue;

		if (empty < 0)
			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
			    !memcmp(attr, &zattr, sizeof(*attr)) &&
			    !data->props)
				empty = curr_index;

		if (found >= 0)
280
			continue;
281

282 283 284 285
		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
		    attr->gid_type != val->gid_type)
			continue;

286
		if (mask & GID_ATTR_FIND_MASK_GID &&
287
		    memcmp(gid, &data->gid, sizeof(*gid)))
288
			continue;
289 290 291

		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
		    attr->ndev != val->ndev)
292
			continue;
293 294

		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
295
		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
296
		    default_gid)
297
			continue;
298

299
		found = curr_index;
300 301
	}

302 303 304 305
	if (pempty)
		*pempty = empty;

	return found;
306 307 308 309 310 311 312 313 314 315 316 317 318 319
}

static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
{
	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;
L
Linus Torvalds 已提交
320
	int ret = 0;
321
	struct net_device *idev;
322
	int empty;
L
Linus Torvalds 已提交
323

324 325 326
	table = ports_table[port - rdma_start_port(ib_dev)];

	if (!memcmp(gid, &zgid, sizeof(*gid)))
L
Linus Torvalds 已提交
327 328
		return -EINVAL;

329 330 331 332
	if (ib_dev->get_netdev) {
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev) {
			union ib_gid default_gid;
L
Linus Torvalds 已提交
333

334 335 336 337 338 339 340 341 342 343
			/* Adding default GIDs in not permitted */
			make_default_gid(idev, &default_gid);
			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
				dev_put(idev);
				return -EPERM;
			}
		}
		if (idev)
			dev_put(idev);
	}
L
Linus Torvalds 已提交
344

345
	mutex_lock(&table->lock);
346
	write_lock_irq(&table->rwlock);
L
Linus Torvalds 已提交
347

348
	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
349
		      GID_ATTR_FIND_MASK_GID_TYPE |
350
		      GID_ATTR_FIND_MASK_NETDEV, &empty);
351 352
	if (ix >= 0)
		goto out_unlock;
L
Linus Torvalds 已提交
353

354
	if (empty < 0) {
355 356 357 358
		ret = -ENOSPC;
		goto out_unlock;
	}

359
	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
360 361
	if (!ret)
		dispatch_gid_change_event(ib_dev, port);
362 363

out_unlock:
364
	write_unlock_irq(&table->rwlock);
365
	mutex_unlock(&table->lock);
L
Linus Torvalds 已提交
366 367 368
	return ret;
}

369 370
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
L
Linus Torvalds 已提交
371
{
372 373 374 375 376 377 378
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;

	table = ports_table[port - rdma_start_port(ib_dev)];

	mutex_lock(&table->lock);
379
	write_lock_irq(&table->rwlock);
380 381 382

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
383
		      GID_ATTR_FIND_MASK_GID_TYPE |
384
		      GID_ATTR_FIND_MASK_NETDEV	  |
385 386
		      GID_ATTR_FIND_MASK_DEFAULT,
		      NULL);
387 388 389
	if (ix < 0)
		goto out_unlock;

390 391
	if (!del_gid(ib_dev, port, table, ix, false))
		dispatch_gid_change_event(ib_dev, port);
392 393

out_unlock:
394
	write_unlock_irq(&table->rwlock);
395 396 397 398 399 400 401 402 403 404
	mutex_unlock(&table->lock);
	return 0;
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	int ix;
405
	bool deleted = false;
406 407 408 409

	table  = ports_table[port - rdma_start_port(ib_dev)];

	mutex_lock(&table->lock);
410
	write_lock_irq(&table->rwlock);
411 412 413

	for (ix = 0; ix < table->sz; ix++)
		if (table->data_vec[ix].attr.ndev == ndev)
414 415 416
			if (!del_gid(ib_dev, port, table, ix,
				     !!(table->data_vec[ix].props &
					GID_TABLE_ENTRY_DEFAULT)))
417
				deleted = true;
418

419
	write_unlock_irq(&table->rwlock);
420
	mutex_unlock(&table->lock);
421 422 423 424

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);

425 426 427 428 429 430 431 432
	return 0;
}

static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
			      union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
433

434
	table = ports_table[port - rdma_start_port(ib_dev)];
L
Linus Torvalds 已提交
435

436 437
	if (index < 0 || index >= table->sz)
		return -EINVAL;
L
Linus Torvalds 已提交
438

439
	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
		return -EAGAIN;

	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
	if (attr) {
		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
		if (attr->ndev)
			dev_hold(attr->ndev);
	}

	return 0;
}

static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
				    const union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	u8 p;
	int local_index;
462
	unsigned long flags;
463 464 465

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
		table = ports_table[p];
466
		read_lock_irqsave(&table->rwlock, flags);
467
		local_index = find_gid(table, gid, val, false, mask, NULL);
468 469 470 471 472
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + rdma_start_port(ib_dev);
473
			read_unlock_irqrestore(&table->rwlock, flags);
474
			return 0;
L
Linus Torvalds 已提交
475
		}
476
		read_unlock_irqrestore(&table->rwlock, flags);
L
Linus Torvalds 已提交
477 478
	}

479 480 481 482 483
	return -ENOENT;
}

static int ib_cache_gid_find(struct ib_device *ib_dev,
			     const union ib_gid *gid,
484
			     enum ib_gid_type gid_type,
485 486 487
			     struct net_device *ndev, u8 *port,
			     u16 *index)
{
488 489 490
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
491 492 493 494 495 496 497 498

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
					mask, port, index);
}

499 500
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
501
			       enum ib_gid_type gid_type,
502 503
			       u8 port, struct net_device *ndev,
			       u16 *index)
504 505 506 507
{
	int local_index;
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
508 509 510
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
511
	unsigned long flags;
512 513 514 515 516 517 518 519 520 521

	if (port < rdma_start_port(ib_dev) ||
	    port > rdma_end_port(ib_dev))
		return -ENOENT;

	table = ports_table[port - rdma_start_port(ib_dev)];

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

522
	read_lock_irqsave(&table->rwlock, flags);
523
	local_index = find_gid(table, gid, &val, false, mask, NULL);
524 525 526
	if (local_index >= 0) {
		if (index)
			*index = local_index;
527
		read_unlock_irqrestore(&table->rwlock, flags);
528 529 530
		return 0;
	}

531
	read_unlock_irqrestore(&table->rwlock, flags);
532 533
	return -ENOENT;
}
534
EXPORT_SYMBOL(ib_find_cached_gid_by_port);
535

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
/**
 * ib_find_gid_by_filter - Returns the GID table index where a specified
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	struct ib_gid_table *table;
	unsigned int i;
568
	unsigned long flags;
569 570 571 572 573 574 575 576 577 578 579 580
	bool found = false;

	if (!ports_table)
		return -EOPNOTSUPP;

	if (port < rdma_start_port(ib_dev) ||
	    port > rdma_end_port(ib_dev) ||
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

	table = ports_table[port - rdma_start_port(ib_dev)];

581
	read_lock_irqsave(&table->rwlock, flags);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
			goto next;

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
			goto next;

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

		if (filter(gid, &attr, context))
			found = true;

next:
		if (found)
			break;
	}
600
	read_unlock_irqrestore(&table->rwlock, flags);
601 602 603 604 605 606 607 608 609

	if (!found)
		return -ENOENT;

	if (index)
		*index = i;
	return 0;
}

610 611 612 613
static struct ib_gid_table *alloc_gid_table(int sz)
{
	struct ib_gid_table *table =
		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
614

615 616 617 618 619 620 621 622 623 624
	if (!table)
		return NULL;

	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
	if (!table->data_vec)
		goto err_free_table;

	mutex_init(&table->lock);

	table->sz = sz;
625
	rwlock_init(&table->rwlock);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

	return table;

err_free_table:
	kfree(table);
	return NULL;
}

static void release_gid_table(struct ib_gid_table *table)
{
	if (table) {
		kfree(table->data_vec);
		kfree(table);
	}
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
				   struct ib_gid_table *table)
{
	int i;
646
	bool deleted = false;
647 648 649 650

	if (!table)
		return;

651
	write_lock_irq(&table->rwlock);
652 653 654
	for (i = 0; i < table->sz; ++i) {
		if (memcmp(&table->data_vec[i].gid, &zgid,
			   sizeof(table->data_vec[i].gid)))
655 656 657 658
			if (!del_gid(ib_dev, port, table, i,
				     table->data_vec[i].props &
				     GID_ATTR_FIND_MASK_DEFAULT))
				deleted = true;
659
	}
660 661 662 663
	write_unlock_irq(&table->rwlock);

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);
664 665 666 667
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
668
				  unsigned long gid_type_mask,
669 670 671 672 673
				  enum ib_cache_gid_default_mode mode)
{
	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
	union ib_gid gid;
	struct ib_gid_attr gid_attr;
674
	struct ib_gid_attr zattr_type = zattr;
675
	struct ib_gid_table *table;
676
	unsigned int gid_type;
677 678 679 680 681 682 683

	table  = ports_table[port - rdma_start_port(ib_dev)];

	make_default_gid(ndev, &gid);
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
		int ix;
		union ib_gid current_gid;
		struct ib_gid_attr current_gid_attr = {};

		if (1UL << gid_type & ~gid_type_mask)
			continue;

		gid_attr.gid_type = gid_type;

		mutex_lock(&table->lock);
		write_lock_irq(&table->rwlock);
		ix = find_gid(table, NULL, &gid_attr, true,
			      GID_ATTR_FIND_MASK_GID_TYPE |
			      GID_ATTR_FIND_MASK_DEFAULT,
			      NULL);

		/* Coudn't find default GID location */
702 703
		if (WARN_ON(ix < 0))
			goto release;
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723

		zattr_type.gid_type = gid_type;

		if (!__ib_cache_gid_get(ib_dev, port, ix,
					&current_gid, &current_gid_attr) &&
		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
			goto release;

		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
		    memcmp(&current_gid_attr, &zattr_type,
			   sizeof(current_gid_attr))) {
			if (del_gid(ib_dev, port, table, ix, true)) {
				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
					ix, gid.raw);
				goto release;
			} else {
				dispatch_gid_change_event(ib_dev, port);
			}
724
		}
725

726 727 728 729 730 731
		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
					gid.raw);
			else
				dispatch_gid_change_event(ib_dev, port);
732
		}
733

734 735 736 737 738 739
release:
		if (current_gid_attr.ndev)
			dev_put(current_gid_attr.ndev);
		write_unlock_irq(&table->rwlock);
		mutex_unlock(&table->lock);
	}
740 741 742 743 744
}

static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				     struct ib_gid_table *table)
{
745 746 747 748 749 750 751 752 753 754
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;
	unsigned int current_gid = 0;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	for (i = 0; i < num_default_gids && i < table->sz; i++) {
		struct ib_gid_table_entry *entry =
			&table->data_vec[i];
755 756

		entry->props |= GID_TABLE_ENTRY_DEFAULT;
757 758 759 760
		current_gid = find_next_bit(&roce_gid_type_mask,
					    BITS_PER_LONG,
					    current_gid);
		entry->attr.gid_type = current_gid++;
761 762 763 764 765 766 767 768 769 770 771 772
	}

	return 0;
}

static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	u8 port;
	struct ib_gid_table **table;
	int err = 0;

	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
773
	if (!table)
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
		return -ENOMEM;

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		u8 rdma_port = port + rdma_start_port(ib_dev);

		table[port] =
			alloc_gid_table(
				ib_dev->port_immutable[rdma_port].gid_tbl_len);
		if (!table[port]) {
			err = -ENOMEM;
			goto rollback_table_setup;
		}

		err = gid_table_reserve_default(ib_dev,
						port + rdma_start_port(ib_dev),
						table[port]);
		if (err)
			goto rollback_table_setup;
	}

	ib_dev->cache.gid_cache = table;
	return 0;

rollback_table_setup:
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
				       table[port]);
		release_gid_table(table[port]);
	}

	kfree(table);
	return err;
}

static void gid_table_release_one(struct ib_device *ib_dev)
{
	struct ib_gid_table **table = ib_dev->cache.gid_cache;
	u8 port;

	if (!table)
		return;

	for (port = 0; port < ib_dev->phys_port_cnt; port++)
		release_gid_table(table[port]);

	kfree(table);
	ib_dev->cache.gid_cache = NULL;
}

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
	struct ib_gid_table **table = ib_dev->cache.gid_cache;
	u8 port;

	if (!table)
		return;

	for (port = 0; port < ib_dev->phys_port_cnt; port++)
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
				       table[port]);
}

static int gid_table_setup_one(struct ib_device *ib_dev)
{
	int err;

	err = _gid_table_setup_one(ib_dev);

	if (err)
		return err;

	err = roce_rescan_device(ib_dev);

	if (err) {
		gid_table_cleanup_one(ib_dev);
		gid_table_release_one(ib_dev);
	}

	return err;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
858 859
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
860
{
861 862 863 864 865
	int res;
	unsigned long flags;
	struct ib_gid_table **ports_table = device->cache.gid_cache;
	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];

866 867 868
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

869 870 871 872 873
	read_lock_irqsave(&table->rwlock, flags);
	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
	read_unlock_irqrestore(&table->rwlock, flags);

	return res;
874 875 876 877 878
}
EXPORT_SYMBOL(ib_get_cached_gid);

int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
879
		       enum ib_gid_type gid_type,
880
		       struct net_device *ndev,
881 882 883
		       u8               *port_num,
		       u16              *index)
{
884
	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
L
Linus Torvalds 已提交
885 886 887
}
EXPORT_SYMBOL(ib_find_cached_gid);

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}
EXPORT_SYMBOL(ib_find_gid_by_filter);

L
Linus Torvalds 已提交
906 907 908 909 910 911 912 913 914
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

915
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
L
Linus Torvalds 已提交
916 917 918 919
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

920
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
L
Linus Torvalds 已提交
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);

int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
942
	int partial_ix = -1;
L
Linus Torvalds 已提交
943

944
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
L
Linus Torvalds 已提交
945 946 947 948
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

949
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
L
Linus Torvalds 已提交
950 951 952 953 954

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
955 956 957 958 959 960
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
L
Linus Torvalds 已提交
961 962
		}

963 964 965 966 967
	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}

L
Linus Torvalds 已提交
968 969 970 971 972 973
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);

974 975 976 977 978 979 980 981 982 983
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

984
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
985 986 987 988
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

989
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

J
Jack Morgenstein 已提交
1006 1007 1008 1009 1010 1011 1012
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

1013
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
J
Jack Morgenstein 已提交
1014 1015 1016
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1017
	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
J
Jack Morgenstein 已提交
1018 1019 1020 1021 1022 1023
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);

L
Linus Torvalds 已提交
1024 1025 1026 1027 1028
static void ib_cache_update(struct ib_device *device,
			    u8                port)
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1029 1030 1031 1032
	struct ib_gid_cache {
		int             table_len;
		union ib_gid    table[0];
	}			  *gid_cache = NULL;
L
Linus Torvalds 已提交
1033 1034
	int                        i;
	int                        ret;
1035 1036 1037 1038 1039 1040 1041 1042 1043
	struct ib_gid_table	  *table;
	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
	bool			   use_roce_gid_table =
					rdma_cap_roce_gid_table(device, port);

	if (port < rdma_start_port(device) || port > rdma_end_port(device))
		return;

	table = ports_table[port - rdma_start_port(device)];
L
Linus Torvalds 已提交
1044 1045 1046 1047 1048 1049 1050

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
P
Parav Pandit 已提交
1051 1052
		pr_warn("ib_query_port failed (%d) for %s\n",
			ret, device->name);
L
Linus Torvalds 已提交
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

1063 1064 1065 1066 1067
	if (!use_roce_gid_table) {
		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
			    sizeof(*gid_cache->table), GFP_KERNEL);
		if (!gid_cache)
			goto err;
L
Linus Torvalds 已提交
1068

1069 1070
		gid_cache->table_len = tprops->gid_tbl_len;
	}
L
Linus Torvalds 已提交
1071 1072 1073 1074

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
P
Parav Pandit 已提交
1075 1076
			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
				ret, device->name, i);
L
Linus Torvalds 已提交
1077 1078 1079 1080
			goto err;
		}
	}

1081 1082 1083
	if (!use_roce_gid_table) {
		for (i = 0;  i < gid_cache->table_len; ++i) {
			ret = ib_query_gid(device, port, i,
1084
					   gid_cache->table + i, NULL);
1085
			if (ret) {
P
Parav Pandit 已提交
1086 1087
				pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
					ret, device->name, i);
1088 1089
				goto err;
			}
L
Linus Torvalds 已提交
1090 1091 1092 1093 1094
		}
	}

	write_lock_irq(&device->cache.lock);

1095
	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
L
Linus Torvalds 已提交
1096

1097
	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
1098
	if (!use_roce_gid_table) {
1099
		write_lock(&table->rwlock);
1100 1101 1102 1103
		for (i = 0; i < gid_cache->table_len; i++) {
			modify_gid(device, port, table, i, gid_cache->table + i,
				   &zattr, false);
		}
1104
		write_unlock(&table->rwlock);
1105
	}
L
Linus Torvalds 已提交
1106

1107
	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
J
Jack Morgenstein 已提交
1108

L
Linus Torvalds 已提交
1109 1110
	write_unlock_irq(&device->cache.lock);

1111
	kfree(gid_cache);
L
Linus Torvalds 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}

D
David Howells 已提交
1122
static void ib_cache_task(struct work_struct *_work)
L
Linus Torvalds 已提交
1123
{
D
David Howells 已提交
1124 1125
	struct ib_update_work *work =
		container_of(_work, struct ib_update_work, work);
L
Linus Torvalds 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	ib_cache_update(work->device, work->port_num);
	kfree(work);
}

static void ib_cache_event(struct ib_event_handler *handler,
			   struct ib_event *event)
{
	struct ib_update_work *work;

	if (event->event == IB_EVENT_PORT_ERR    ||
	    event->event == IB_EVENT_PORT_ACTIVE ||
	    event->event == IB_EVENT_LID_CHANGE  ||
	    event->event == IB_EVENT_PKEY_CHANGE ||
1140
	    event->event == IB_EVENT_SM_CHANGE   ||
O
Or Gerlitz 已提交
1141 1142
	    event->event == IB_EVENT_CLIENT_REREGISTER ||
	    event->event == IB_EVENT_GID_CHANGE) {
L
Linus Torvalds 已提交
1143 1144
		work = kmalloc(sizeof *work, GFP_ATOMIC);
		if (work) {
D
David Howells 已提交
1145
			INIT_WORK(&work->work, ib_cache_task);
L
Linus Torvalds 已提交
1146 1147
			work->device   = event->device;
			work->port_num = event->element.port_num;
T
Tejun Heo 已提交
1148
			queue_work(ib_wq, &work->work);
L
Linus Torvalds 已提交
1149 1150 1151 1152
		}
	}
}

1153
int ib_cache_setup_one(struct ib_device *device)
L
Linus Torvalds 已提交
1154 1155
{
	int p;
1156
	int err;
L
Linus Torvalds 已提交
1157 1158 1159 1160

	rwlock_init(&device->cache.lock);

	device->cache.pkey_cache =
1161
		kzalloc(sizeof *device->cache.pkey_cache *
1162
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
J
Jack Morgenstein 已提交
1163
	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
1164 1165
					  (rdma_end_port(device) -
					   rdma_start_port(device) + 1),
J
Jack Morgenstein 已提交
1166
					  GFP_KERNEL);
1167
	if (!device->cache.pkey_cache ||
J
Jack Morgenstein 已提交
1168
	    !device->cache.lmc_cache) {
1169 1170
		err = -ENOMEM;
		goto free;
L
Linus Torvalds 已提交
1171 1172
	}

1173 1174
	err = gid_table_setup_one(device);
	if (err)
1175
		goto free;
1176

1177
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1178
		ib_cache_update(device, p + rdma_start_port(device));
L
Linus Torvalds 已提交
1179 1180 1181

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
1182 1183 1184
	err = ib_register_event_handler(&device->cache.event_handler);
	if (err)
		goto err;
L
Linus Torvalds 已提交
1185

1186
	return 0;
L
Linus Torvalds 已提交
1187 1188

err:
1189
	gid_table_cleanup_one(device);
1190 1191 1192
free:
	kfree(device->cache.pkey_cache);
	kfree(device->cache.lmc_cache);
1193
	return err;
L
Linus Torvalds 已提交
1194 1195
}

1196
void ib_cache_release_one(struct ib_device *device)
L
Linus Torvalds 已提交
1197 1198 1199
{
	int p;

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
	/*
	 * The release function frees all the cache elements.
	 * This function should be called as part of freeing
	 * all the device's resources when the cache could no
	 * longer be accessed.
	 */
	if (device->cache.pkey_cache)
		for (p = 0;
		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
			kfree(device->cache.pkey_cache[p]);

	gid_table_release_one(device);
L
Linus Torvalds 已提交
1212
	kfree(device->cache.pkey_cache);
J
Jack Morgenstein 已提交
1213
	kfree(device->cache.lmc_cache);
L
Linus Torvalds 已提交
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
void ib_cache_cleanup_one(struct ib_device *device)
{
	/* The cleanup function unregisters the event handler,
	 * waits for all in-progress workqueue elements and cleans
	 * up the GID cache. This function should be called after
	 * the device was removed from the devices list and all
	 * clients were removed, so the cache exists but is
	 * non-functional and shouldn't be updated anymore.
	 */
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);
	gid_table_cleanup_one(device);
}
L
Linus Torvalds 已提交
1229

1230
void __init ib_cache_setup(void)
L
Linus Torvalds 已提交
1231
{
1232
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1233 1234 1235 1236
}

void __exit ib_cache_cleanup(void)
{
1237
	roce_gid_mgmt_cleanup();
L
Linus Torvalds 已提交
1238
}