cache.c 32.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3 4 5
 * Copyright (c) 2005 Intel Corporation. All rights reserved.
 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
A
Alexey Dobriyan 已提交
39
#include <linux/workqueue.h>
40 41
#include <linux/netdevice.h>
#include <net/addrconf.h>
L
Linus Torvalds 已提交
42

43
#include <rdma/ib_cache.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55

#include "core_priv.h"

struct ib_pkey_cache {
	int             table_len;
	u16             table[0];
};

struct ib_update_work {
	struct work_struct work;
	struct ib_device  *device;
	u8                 port_num;
56
	bool		   enforce_security;
L
Linus Torvalds 已提交
57 58
};

59 60
union ib_gid zgid;
EXPORT_SYMBOL(zgid);
61 62 63 64 65 66 67

static const struct ib_gid_attr zattr;

enum gid_attr_find_mask {
	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
68
	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
};

enum gid_table_entry_props {
	GID_TABLE_ENTRY_INVALID		= 1UL << 0,
	GID_TABLE_ENTRY_DEFAULT		= 1UL << 1,
};

enum gid_table_write_action {
	GID_TABLE_WRITE_ACTION_ADD,
	GID_TABLE_WRITE_ACTION_DEL,
	/* MODIFY only updates the GID table. Currently only used by
	 * ib_cache_update.
	 */
	GID_TABLE_WRITE_ACTION_MODIFY
};

struct ib_gid_table_entry {
	unsigned long	    props;
	union ib_gid        gid;
	struct ib_gid_attr  attr;
	void		   *context;
};

struct ib_gid_table {
	int                  sz;
	/* In RoCE, adding a GID to the table requires:
	 * (a) Find if this GID is already exists.
	 * (b) Find a free space.
	 * (c) Write the new GID
	 *
	 * Delete requires different set of operations:
	 * (a) Find the GID
	 * (b) Delete it.
	 *
	 * Add/delete should be carried out atomically.
	 * This is done by locking this mutex from multiple
	 * writers. We don't need this lock for IB, as the MAD
	 * layer replaces all entries. All data_vec entries
	 * are locked by this lock.
	 **/
	struct mutex         lock;
110 111 112 113
	/* This lock protects the table entries from being
	 * read and written simultaneously.
	 */
	rwlock_t	     rwlock;
114 115 116
	struct ib_gid_table_entry *data_vec;
};

117 118 119 120 121 122 123 124 125 126 127 128 129
static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
{
	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		struct ib_event event;

		event.device		= ib_dev;
		event.element.port_num	= port;
		event.event		= IB_EVENT_GID_CHANGE;

		ib_dispatch_event(&event);
	}
}

130 131
static const char * const gid_type_str[] = {
	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
132
	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
133 134 135 136 137 138 139 140 141 142 143
};

const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
{
	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
		return gid_type_str[gid_type];

	return "Invalid GID type";
}
EXPORT_SYMBOL(ib_cache_gid_type_str);

M
Matan Barak 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
int ib_cache_gid_parse_type_str(const char *buf)
{
	unsigned int i;
	size_t len;
	int err = -EINVAL;

	len = strlen(buf);
	if (len == 0)
		return -EINVAL;

	if (buf[len - 1] == '\n')
		len--;

	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
		    len == strlen(gid_type_str[i])) {
			err = i;
			break;
		}

	return err;
}
EXPORT_SYMBOL(ib_cache_gid_parse_type_str);

168 169 170 171
/* This function expects that rwlock will be write locked in all
 * scenarios and that lock will be locked in sleep-able (RoCE)
 * scenarios.
 */
172 173 174 175 176 177
static int write_gid(struct ib_device *ib_dev, u8 port,
		     struct ib_gid_table *table, int ix,
		     const union ib_gid *gid,
		     const struct ib_gid_attr *attr,
		     enum gid_table_write_action action,
		     bool  default_gid)
178
	__releases(&table->rwlock) __acquires(&table->rwlock)
L
Linus Torvalds 已提交
179
{
180 181
	int ret = 0;
	struct net_device *old_net_dev;
182
	enum ib_gid_type old_gid_type;
183 184 185 186 187 188 189

	/* in rdma_cap_roce_gid_table, this funciton should be protected by a
	 * sleep-able lock.
	 */

	if (rdma_cap_roce_gid_table(ib_dev, port)) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
190
		write_unlock_irq(&table->rwlock);
191 192 193 194 195 196 197 198 199
		/* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
		 * RoCE providers and thus only updates the cache.
		 */
		if (action == GID_TABLE_WRITE_ACTION_ADD)
			ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
					      &table->data_vec[ix].context);
		else if (action == GID_TABLE_WRITE_ACTION_DEL)
			ret = ib_dev->del_gid(ib_dev, port, ix,
					      &table->data_vec[ix].context);
200
		write_lock_irq(&table->rwlock);
201 202 203
	}

	old_net_dev = table->data_vec[ix].attr.ndev;
204
	old_gid_type = table->data_vec[ix].attr.gid_type;
205 206 207 208 209 210 211 212
	if (old_net_dev && old_net_dev != attr->ndev)
		dev_put(old_net_dev);
	/* if modify_gid failed, just delete the old gid */
	if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
		gid = &zgid;
		attr = &zattr;
		table->data_vec[ix].context = NULL;
	}
213

214 215
	memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
	memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
216 217 218 219 220
	if (default_gid) {
		table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
		if (action == GID_TABLE_WRITE_ACTION_DEL)
			table->data_vec[ix].attr.gid_type = old_gid_type;
	}
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	if (table->data_vec[ix].attr.ndev &&
	    table->data_vec[ix].attr.ndev != old_net_dev)
		dev_hold(table->data_vec[ix].attr.ndev);

	table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;

	return ret;
}

static int add_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   const union ib_gid *gid,
		   const struct ib_gid_attr *attr,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_ADD, default_gid);
}

static int modify_gid(struct ib_device *ib_dev, u8 port,
		      struct ib_gid_table *table, int ix,
		      const union ib_gid *gid,
		      const struct ib_gid_attr *attr,
		      bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, gid, attr,
			 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
}

static int del_gid(struct ib_device *ib_dev, u8 port,
		   struct ib_gid_table *table, int ix,
		   bool  default_gid) {
	return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
			 GID_TABLE_WRITE_ACTION_DEL, default_gid);
}

255
/* rwlock should be read locked */
256 257
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		    const struct ib_gid_attr *val, bool default_gid,
258
		    unsigned long mask, int *pempty)
259
{
260 261 262
	int i = 0;
	int found = -1;
	int empty = pempty ? -1 : 0;
263

264 265 266 267
	while (i < table->sz && (found < 0 || empty < 0)) {
		struct ib_gid_table_entry *data = &table->data_vec[i];
		struct ib_gid_attr *attr = &data->attr;
		int curr_index = i;
268

269
		i++;
270

271 272 273 274 275 276 277 278 279 280
		if (data->props & GID_TABLE_ENTRY_INVALID)
			continue;

		if (empty < 0)
			if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
			    !memcmp(attr, &zattr, sizeof(*attr)) &&
			    !data->props)
				empty = curr_index;

		if (found >= 0)
281
			continue;
282

283 284 285 286
		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
		    attr->gid_type != val->gid_type)
			continue;

287
		if (mask & GID_ATTR_FIND_MASK_GID &&
288
		    memcmp(gid, &data->gid, sizeof(*gid)))
289
			continue;
290 291 292

		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
		    attr->ndev != val->ndev)
293
			continue;
294 295

		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
296
		    !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
297
		    default_gid)
298
			continue;
299

300
		found = curr_index;
301 302
	}

303 304 305 306
	if (pempty)
		*pempty = empty;

	return found;
307 308 309 310 311 312 313 314 315 316 317 318 319
}

static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
{
	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
	addrconf_ifid_eui48(&gid->raw[8], dev);
}

int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table *table;
	int ix;
L
Linus Torvalds 已提交
320
	int ret = 0;
321
	struct net_device *idev;
322
	int empty;
L
Linus Torvalds 已提交
323

324
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
325 326

	if (!memcmp(gid, &zgid, sizeof(*gid)))
L
Linus Torvalds 已提交
327 328
		return -EINVAL;

329 330 331 332
	if (ib_dev->get_netdev) {
		idev = ib_dev->get_netdev(ib_dev, port);
		if (idev && attr->ndev != idev) {
			union ib_gid default_gid;
L
Linus Torvalds 已提交
333

334 335 336 337 338 339 340 341 342 343
			/* Adding default GIDs in not permitted */
			make_default_gid(idev, &default_gid);
			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
				dev_put(idev);
				return -EPERM;
			}
		}
		if (idev)
			dev_put(idev);
	}
L
Linus Torvalds 已提交
344

345
	mutex_lock(&table->lock);
346
	write_lock_irq(&table->rwlock);
L
Linus Torvalds 已提交
347

348
	ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
349
		      GID_ATTR_FIND_MASK_GID_TYPE |
350
		      GID_ATTR_FIND_MASK_NETDEV, &empty);
351 352
	if (ix >= 0)
		goto out_unlock;
L
Linus Torvalds 已提交
353

354
	if (empty < 0) {
355 356 357 358
		ret = -ENOSPC;
		goto out_unlock;
	}

359
	ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
360 361
	if (!ret)
		dispatch_gid_change_event(ib_dev, port);
362 363

out_unlock:
364
	write_unlock_irq(&table->rwlock);
365
	mutex_unlock(&table->lock);
L
Linus Torvalds 已提交
366 367 368
	return ret;
}

369 370
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
L
Linus Torvalds 已提交
371
{
372 373 374
	struct ib_gid_table *table;
	int ix;

375
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
376 377

	mutex_lock(&table->lock);
378
	write_lock_irq(&table->rwlock);
379 380 381

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
382
		      GID_ATTR_FIND_MASK_GID_TYPE |
383
		      GID_ATTR_FIND_MASK_NETDEV	  |
384 385
		      GID_ATTR_FIND_MASK_DEFAULT,
		      NULL);
386 387 388
	if (ix < 0)
		goto out_unlock;

389 390
	if (!del_gid(ib_dev, port, table, ix, false))
		dispatch_gid_change_event(ib_dev, port);
391 392

out_unlock:
393
	write_unlock_irq(&table->rwlock);
394 395 396 397 398 399 400 401 402
	mutex_unlock(&table->lock);
	return 0;
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
	struct ib_gid_table *table;
	int ix;
403
	bool deleted = false;
404

405
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
406 407

	mutex_lock(&table->lock);
408
	write_lock_irq(&table->rwlock);
409 410 411

	for (ix = 0; ix < table->sz; ix++)
		if (table->data_vec[ix].attr.ndev == ndev)
412 413 414
			if (!del_gid(ib_dev, port, table, ix,
				     !!(table->data_vec[ix].props &
					GID_TABLE_ENTRY_DEFAULT)))
415
				deleted = true;
416

417
	write_unlock_irq(&table->rwlock);
418
	mutex_unlock(&table->lock);
419 420 421 422

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);

423 424 425 426 427 428 429
	return 0;
}

static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
			      union ib_gid *gid, struct ib_gid_attr *attr)
{
	struct ib_gid_table *table;
L
Linus Torvalds 已提交
430

431
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
L
Linus Torvalds 已提交
432

433 434
	if (index < 0 || index >= table->sz)
		return -EINVAL;
L
Linus Torvalds 已提交
435

436
	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
		return -EAGAIN;

	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
	if (attr) {
		memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
		if (attr->ndev)
			dev_hold(attr->ndev);
	}

	return 0;
}

static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
				    const union ib_gid *gid,
				    const struct ib_gid_attr *val,
				    unsigned long mask,
				    u8 *port, u16 *index)
{
	struct ib_gid_table *table;
	u8 p;
	int local_index;
458
	unsigned long flags;
459 460

	for (p = 0; p < ib_dev->phys_port_cnt; p++) {
461
		table = ib_dev->cache.ports[p].gid;
462
		read_lock_irqsave(&table->rwlock, flags);
463
		local_index = find_gid(table, gid, val, false, mask, NULL);
464 465 466 467 468
		if (local_index >= 0) {
			if (index)
				*index = local_index;
			if (port)
				*port = p + rdma_start_port(ib_dev);
469
			read_unlock_irqrestore(&table->rwlock, flags);
470
			return 0;
L
Linus Torvalds 已提交
471
		}
472
		read_unlock_irqrestore(&table->rwlock, flags);
L
Linus Torvalds 已提交
473 474
	}

475 476 477 478 479
	return -ENOENT;
}

static int ib_cache_gid_find(struct ib_device *ib_dev,
			     const union ib_gid *gid,
480
			     enum ib_gid_type gid_type,
481 482 483
			     struct net_device *ndev, u8 *port,
			     u16 *index)
{
484 485 486
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
487 488 489 490 491 492 493 494

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
					mask, port, index);
}

495 496 497 498 499 500 501 502 503 504 505 506 507
/**
 * ib_find_cached_gid_by_port - Returns the GID table index where a specified
 * GID value occurs. It searches for the specified GID value in the local
 * software cache.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @port_num: The port number of the device where the GID value should be
 *   searched.
 * @ndev: In RoCE, the net device of the device. Null means ignore.
 * @index: The index into the cached GID table where the GID was found. This
 *   parameter may be NULL.
 */
508 509
int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
			       const union ib_gid *gid,
510
			       enum ib_gid_type gid_type,
511 512
			       u8 port, struct net_device *ndev,
			       u16 *index)
513 514 515
{
	int local_index;
	struct ib_gid_table *table;
516 517 518
	unsigned long mask = GID_ATTR_FIND_MASK_GID |
			     GID_ATTR_FIND_MASK_GID_TYPE;
	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
519
	unsigned long flags;
520

521
	if (!rdma_is_port_valid(ib_dev, port))
522 523
		return -ENOENT;

524
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
525 526 527 528

	if (ndev)
		mask |= GID_ATTR_FIND_MASK_NETDEV;

529
	read_lock_irqsave(&table->rwlock, flags);
530
	local_index = find_gid(table, gid, &val, false, mask, NULL);
531 532 533
	if (local_index >= 0) {
		if (index)
			*index = local_index;
534
		read_unlock_irqrestore(&table->rwlock, flags);
535 536 537
		return 0;
	}

538
	read_unlock_irqrestore(&table->rwlock, flags);
539 540
	return -ENOENT;
}
541
EXPORT_SYMBOL(ib_find_cached_gid_by_port);
542

543
/**
544
 * ib_cache_gid_find_by_filter - Returns the GID table index where a specified
545 546 547 548 549 550 551 552 553 554
 * GID value occurs
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @port_num: The port number of the device where the GID value could be
 *   searched.
 * @filter: The filter function is executed on any matching GID in the table.
 *   If the filter function returns true, the corresponding index is returned,
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
555
 * @index: The index into the cached GID table where the GID was found. This
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
 *   parameter may be NULL.
 *
 * ib_cache_gid_find_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
 * This function is only supported on RoCE ports.
 *
 */
static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
				       const union ib_gid *gid,
				       u8 port,
				       bool (*filter)(const union ib_gid *,
						      const struct ib_gid_attr *,
						      void *),
				       void *context,
				       u16 *index)
{
	struct ib_gid_table *table;
	unsigned int i;
574
	unsigned long flags;
575 576 577
	bool found = false;


578
	if (!rdma_is_port_valid(ib_dev, port) ||
579 580 581
	    !rdma_protocol_roce(ib_dev, port))
		return -EPROTONOSUPPORT;

582
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
583

584
	read_lock_irqsave(&table->rwlock, flags);
585 586 587 588
	for (i = 0; i < table->sz; i++) {
		struct ib_gid_attr attr;

		if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
589
			continue;
590 591

		if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
592
			continue;
593 594 595

		memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));

596
		if (filter(gid, &attr, context)) {
597
			found = true;
598 599
			if (index)
				*index = i;
600
			break;
601
		}
602
	}
603
	read_unlock_irqrestore(&table->rwlock, flags);
604 605 606 607 608 609

	if (!found)
		return -ENOENT;
	return 0;
}

610 611 612 613
static struct ib_gid_table *alloc_gid_table(int sz)
{
	struct ib_gid_table *table =
		kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
614

615 616 617 618 619 620 621 622 623 624
	if (!table)
		return NULL;

	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
	if (!table->data_vec)
		goto err_free_table;

	mutex_init(&table->lock);

	table->sz = sz;
625
	rwlock_init(&table->rwlock);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

	return table;

err_free_table:
	kfree(table);
	return NULL;
}

static void release_gid_table(struct ib_gid_table *table)
{
	if (table) {
		kfree(table->data_vec);
		kfree(table);
	}
}

static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
				   struct ib_gid_table *table)
{
	int i;
646
	bool deleted = false;
647 648 649 650

	if (!table)
		return;

651
	write_lock_irq(&table->rwlock);
652 653 654
	for (i = 0; i < table->sz; ++i) {
		if (memcmp(&table->data_vec[i].gid, &zgid,
			   sizeof(table->data_vec[i].gid)))
655 656 657 658
			if (!del_gid(ib_dev, port, table, i,
				     table->data_vec[i].props &
				     GID_ATTR_FIND_MASK_DEFAULT))
				deleted = true;
659
	}
660 661 662 663
	write_unlock_irq(&table->rwlock);

	if (deleted)
		dispatch_gid_change_event(ib_dev, port);
664 665 666 667
}

void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  struct net_device *ndev,
668
				  unsigned long gid_type_mask,
669 670 671 672
				  enum ib_cache_gid_default_mode mode)
{
	union ib_gid gid;
	struct ib_gid_attr gid_attr;
673
	struct ib_gid_attr zattr_type = zattr;
674
	struct ib_gid_table *table;
675
	unsigned int gid_type;
676

677
	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
678 679 680 681 682

	make_default_gid(ndev, &gid);
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
		int ix;
		union ib_gid current_gid;
		struct ib_gid_attr current_gid_attr = {};

		if (1UL << gid_type & ~gid_type_mask)
			continue;

		gid_attr.gid_type = gid_type;

		mutex_lock(&table->lock);
		write_lock_irq(&table->rwlock);
		ix = find_gid(table, NULL, &gid_attr, true,
			      GID_ATTR_FIND_MASK_GID_TYPE |
			      GID_ATTR_FIND_MASK_DEFAULT,
			      NULL);

		/* Coudn't find default GID location */
701 702
		if (WARN_ON(ix < 0))
			goto release;
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722

		zattr_type.gid_type = gid_type;

		if (!__ib_cache_gid_get(ib_dev, port, ix,
					&current_gid, &current_gid_attr) &&
		    mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
		    !memcmp(&gid, &current_gid, sizeof(gid)) &&
		    !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
			goto release;

		if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
		    memcmp(&current_gid_attr, &zattr_type,
			   sizeof(current_gid_attr))) {
			if (del_gid(ib_dev, port, table, ix, true)) {
				pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
					ix, gid.raw);
				goto release;
			} else {
				dispatch_gid_change_event(ib_dev, port);
			}
723
		}
724

725 726 727 728 729 730
		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
			if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
				pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
					gid.raw);
			else
				dispatch_gid_change_event(ib_dev, port);
731
		}
732

733 734 735 736 737 738
release:
		if (current_gid_attr.ndev)
			dev_put(current_gid_attr.ndev);
		write_unlock_irq(&table->rwlock);
		mutex_unlock(&table->lock);
	}
739 740 741 742 743
}

static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				     struct ib_gid_table *table)
{
744 745 746 747 748 749 750 751 752 753
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;
	unsigned int current_gid = 0;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	for (i = 0; i < num_default_gids && i < table->sz; i++) {
		struct ib_gid_table_entry *entry =
			&table->data_vec[i];
754 755

		entry->props |= GID_TABLE_ENTRY_DEFAULT;
756 757 758 759
		current_gid = find_next_bit(&roce_gid_type_mask,
					    BITS_PER_LONG,
					    current_gid);
		entry->attr.gid_type = current_gid++;
760 761 762 763 764 765 766 767
	}

	return 0;
}

static int _gid_table_setup_one(struct ib_device *ib_dev)
{
	u8 port;
768
	struct ib_gid_table *table;
769 770 771 772 773
	int err = 0;

	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		u8 rdma_port = port + rdma_start_port(ib_dev);

774
		table =
775 776
			alloc_gid_table(
				ib_dev->port_immutable[rdma_port].gid_tbl_len);
777
		if (!table) {
778 779 780 781 782 783
			err = -ENOMEM;
			goto rollback_table_setup;
		}

		err = gid_table_reserve_default(ib_dev,
						port + rdma_start_port(ib_dev),
784
						table);
785 786
		if (err)
			goto rollback_table_setup;
787
		ib_dev->cache.ports[port].gid = table;
788 789 790 791 792 793
	}

	return 0;

rollback_table_setup:
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
794 795
		table = ib_dev->cache.ports[port].gid;

796
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
797 798
				       table);
		release_gid_table(table);
799 800 801 802 803 804 805
	}

	return err;
}

static void gid_table_release_one(struct ib_device *ib_dev)
{
806
	struct ib_gid_table *table;
807 808
	u8 port;

809 810 811 812 813
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
		release_gid_table(table);
		ib_dev->cache.ports[port].gid = NULL;
	}
814 815 816 817
}

static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
818
	struct ib_gid_table *table;
819 820
	u8 port;

821 822
	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
		table = ib_dev->cache.ports[port].gid;
823
		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
824 825
				       table);
	}
826 827 828 829 830 831 832 833 834 835 836
}

static int gid_table_setup_one(struct ib_device *ib_dev)
{
	int err;

	err = _gid_table_setup_one(ib_dev);

	if (err)
		return err;

837
	rdma_roce_rescan_device(ib_dev);
838 839 840 841 842 843 844

	return err;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
845 846
		      union ib_gid     *gid,
		      struct ib_gid_attr *gid_attr)
847
{
848 849
	int res;
	unsigned long flags;
850
	struct ib_gid_table *table;
851

852
	if (!rdma_is_port_valid(device, port_num))
853 854
		return -EINVAL;

855
	table = device->cache.ports[port_num - rdma_start_port(device)].gid;
856 857 858 859 860
	read_lock_irqsave(&table->rwlock, flags);
	res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
	read_unlock_irqrestore(&table->rwlock, flags);

	return res;
861 862 863
}
EXPORT_SYMBOL(ib_get_cached_gid);

864 865 866 867 868 869 870 871 872 873 874 875 876 877
/**
 * ib_find_cached_gid - Returns the port number and GID table index where
 *   a specified GID value occurs.
 * @device: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 * @port_num: The port number of the device where the GID value was found.
 * @index: The index into the cached GID table where the GID was found.  This
 *   parameter may be NULL.
 *
 * ib_find_cached_gid() searches for the specified GID value in
 * the local software cache.
 */
878 879
int ib_find_cached_gid(struct ib_device *device,
		       const union ib_gid *gid,
880
		       enum ib_gid_type gid_type,
881
		       struct net_device *ndev,
882 883 884
		       u8               *port_num,
		       u16              *index)
{
885
	return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
L
Linus Torvalds 已提交
886 887 888
}
EXPORT_SYMBOL(ib_find_cached_gid);

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
int ib_find_gid_by_filter(struct ib_device *device,
			  const union ib_gid *gid,
			  u8 port_num,
			  bool (*filter)(const union ib_gid *gid,
					 const struct ib_gid_attr *,
					 void *),
			  void *context, u16 *index)
{
	/* Only RoCE GID table supports filter function */
	if (!rdma_cap_roce_gid_table(device, port_num) && filter)
		return -EPROTONOSUPPORT;

	return ib_cache_gid_find_by_filter(device, gid,
					   port_num, filter,
					   context, index);
}

L
Linus Torvalds 已提交
906 907 908 909 910 911 912 913 914
int ib_get_cached_pkey(struct ib_device *device,
		       u8                port_num,
		       int               index,
		       u16              *pkey)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int ret = 0;

915
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
916 917 918 919
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

920
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
921 922 923 924 925 926 927 928 929 930 931 932

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
	else
		*pkey = cache->table[index];

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);

933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
int ib_get_cached_subnet_prefix(struct ib_device *device,
				u8                port_num,
				u64              *sn_pfx)
{
	unsigned long flags;
	int p;

	if (port_num < rdma_start_port(device) ||
	    port_num > rdma_end_port(device))
		return -EINVAL;

	p = port_num - rdma_start_port(device);
	read_lock_irqsave(&device->cache.lock, flags);
	*sn_pfx = device->cache.ports[p].subnet_prefix;
	read_unlock_irqrestore(&device->cache.lock, flags);

	return 0;
}
EXPORT_SYMBOL(ib_get_cached_subnet_prefix);

L
Linus Torvalds 已提交
953 954 955 956 957 958 959 960 961
int ib_find_cached_pkey(struct ib_device *device,
			u8                port_num,
			u16               pkey,
			u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;
962
	int partial_ix = -1;
L
Linus Torvalds 已提交
963

964
	if (!rdma_is_port_valid(device, port_num))
L
Linus Torvalds 已提交
965 966 967 968
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

969
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
970 971 972 973 974

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
975 976 977 978 979 980
			if (cache->table[i] & 0x8000) {
				*index = i;
				ret = 0;
				break;
			} else
				partial_ix = i;
L
Linus Torvalds 已提交
981 982
		}

983 984 985 986 987
	if (ret && partial_ix >= 0) {
		*index = partial_ix;
		ret = 0;
	}

L
Linus Torvalds 已提交
988 989 990 991 992 993
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);

994 995 996 997 998 999 1000 1001 1002 1003
int ib_find_exact_cached_pkey(struct ib_device *device,
			      u8                port_num,
			      u16               pkey,
			      u16              *index)
{
	struct ib_pkey_cache *cache;
	unsigned long flags;
	int i;
	int ret = -ENOENT;

1004
	if (!rdma_is_port_valid(device, port_num))
1005 1006 1007 1008
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

1009
	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

	*index = -1;

	for (i = 0; i < cache->table_len; ++i)
		if (cache->table[i] == pkey) {
			*index = i;
			ret = 0;
			break;
		}

	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);

J
Jack Morgenstein 已提交
1026 1027 1028 1029 1030 1031 1032
int ib_get_cached_lmc(struct ib_device *device,
		      u8                port_num,
		      u8                *lmc)
{
	unsigned long flags;
	int ret = 0;

1033
	if (!rdma_is_port_valid(device, port_num))
J
Jack Morgenstein 已提交
1034 1035 1036
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1037
	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
J
Jack Morgenstein 已提交
1038 1039 1040 1041 1042 1043
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
int ib_get_cached_port_state(struct ib_device   *device,
			     u8                  port_num,
			     enum ib_port_state *port_state)
{
	unsigned long flags;
	int ret = 0;

	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
1055 1056
	*port_state = device->cache.ports[port_num
		- rdma_start_port(device)].port_state;
1057 1058 1059 1060 1061 1062
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
}
EXPORT_SYMBOL(ib_get_cached_port_state);

L
Linus Torvalds 已提交
1063
static void ib_cache_update(struct ib_device *device,
1064 1065
			    u8                port,
			    bool	      enforce_security)
L
Linus Torvalds 已提交
1066 1067 1068
{
	struct ib_port_attr       *tprops = NULL;
	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1069 1070 1071 1072
	struct ib_gid_cache {
		int             table_len;
		union ib_gid    table[0];
	}			  *gid_cache = NULL;
L
Linus Torvalds 已提交
1073 1074
	int                        i;
	int                        ret;
1075 1076 1077 1078
	struct ib_gid_table	  *table;
	bool			   use_roce_gid_table =
					rdma_cap_roce_gid_table(device, port);

1079
	if (!rdma_is_port_valid(device, port))
1080 1081
		return;

1082
	table = device->cache.ports[port - rdma_start_port(device)].gid;
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087 1088 1089

	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
	if (!tprops)
		return;

	ret = ib_query_port(device, port, tprops);
	if (ret) {
P
Parav Pandit 已提交
1090 1091
		pr_warn("ib_query_port failed (%d) for %s\n",
			ret, device->name);
L
Linus Torvalds 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
		goto err;
	}

	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
			     sizeof *pkey_cache->table, GFP_KERNEL);
	if (!pkey_cache)
		goto err;

	pkey_cache->table_len = tprops->pkey_tbl_len;

1102 1103 1104 1105 1106
	if (!use_roce_gid_table) {
		gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
			    sizeof(*gid_cache->table), GFP_KERNEL);
		if (!gid_cache)
			goto err;
L
Linus Torvalds 已提交
1107

1108 1109
		gid_cache->table_len = tprops->gid_tbl_len;
	}
L
Linus Torvalds 已提交
1110 1111 1112 1113

	for (i = 0; i < pkey_cache->table_len; ++i) {
		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
		if (ret) {
P
Parav Pandit 已提交
1114 1115
			pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
				ret, device->name, i);
L
Linus Torvalds 已提交
1116 1117 1118 1119
			goto err;
		}
	}

1120 1121 1122
	if (!use_roce_gid_table) {
		for (i = 0;  i < gid_cache->table_len; ++i) {
			ret = ib_query_gid(device, port, i,
1123
					   gid_cache->table + i, NULL);
1124
			if (ret) {
P
Parav Pandit 已提交
1125 1126
				pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
					ret, device->name, i);
1127 1128
				goto err;
			}
L
Linus Torvalds 已提交
1129 1130 1131 1132 1133
		}
	}

	write_lock_irq(&device->cache.lock);

1134 1135
	old_pkey_cache = device->cache.ports[port -
		rdma_start_port(device)].pkey;
L
Linus Torvalds 已提交
1136

1137
	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1138
	if (!use_roce_gid_table) {
1139
		write_lock(&table->rwlock);
1140 1141 1142 1143
		for (i = 0; i < gid_cache->table_len; i++) {
			modify_gid(device, port, table, i, gid_cache->table + i,
				   &zattr, false);
		}
1144
		write_unlock(&table->rwlock);
1145
	}
L
Linus Torvalds 已提交
1146

1147 1148
	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
	device->cache.ports[port - rdma_start_port(device)].port_state =
J
Jack Wang 已提交
1149
		tprops->state;
J
Jack Morgenstein 已提交
1150

1151 1152
	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
							tprops->subnet_prefix;
L
Linus Torvalds 已提交
1153 1154
	write_unlock_irq(&device->cache.lock);

1155 1156 1157 1158 1159
	if (enforce_security)
		ib_security_cache_change(device,
					 port,
					 tprops->subnet_prefix);

1160
	kfree(gid_cache);
L
Linus Torvalds 已提交
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	kfree(old_pkey_cache);
	kfree(tprops);
	return;

err:
	kfree(pkey_cache);
	kfree(gid_cache);
	kfree(tprops);
}

D
David Howells 已提交
1171
static void ib_cache_task(struct work_struct *_work)
L
Linus Torvalds 已提交
1172
{
D
David Howells 已提交
1173 1174
	struct ib_update_work *work =
		container_of(_work, struct ib_update_work, work);
L
Linus Torvalds 已提交
1175

1176 1177 1178
	ib_cache_update(work->device,
			work->port_num,
			work->enforce_security);
L
Linus Torvalds 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	kfree(work);
}

static void ib_cache_event(struct ib_event_handler *handler,
			   struct ib_event *event)
{
	struct ib_update_work *work;

	if (event->event == IB_EVENT_PORT_ERR    ||
	    event->event == IB_EVENT_PORT_ACTIVE ||
	    event->event == IB_EVENT_LID_CHANGE  ||
	    event->event == IB_EVENT_PKEY_CHANGE ||
1191
	    event->event == IB_EVENT_SM_CHANGE   ||
O
Or Gerlitz 已提交
1192 1193
	    event->event == IB_EVENT_CLIENT_REREGISTER ||
	    event->event == IB_EVENT_GID_CHANGE) {
L
Linus Torvalds 已提交
1194 1195
		work = kmalloc(sizeof *work, GFP_ATOMIC);
		if (work) {
D
David Howells 已提交
1196
			INIT_WORK(&work->work, ib_cache_task);
L
Linus Torvalds 已提交
1197 1198
			work->device   = event->device;
			work->port_num = event->element.port_num;
1199 1200 1201 1202 1203 1204
			if (event->event == IB_EVENT_PKEY_CHANGE ||
			    event->event == IB_EVENT_GID_CHANGE)
				work->enforce_security = true;
			else
				work->enforce_security = false;

T
Tejun Heo 已提交
1205
			queue_work(ib_wq, &work->work);
L
Linus Torvalds 已提交
1206 1207 1208 1209
		}
	}
}

1210
int ib_cache_setup_one(struct ib_device *device)
L
Linus Torvalds 已提交
1211 1212
{
	int p;
1213
	int err;
L
Linus Torvalds 已提交
1214 1215 1216

	rwlock_init(&device->cache.lock);

1217 1218
	device->cache.ports =
		kzalloc(sizeof(*device->cache.ports) *
1219
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1220 1221
	if (!device->cache.ports)
		return -ENOMEM;
L
Linus Torvalds 已提交
1222

1223
	err = gid_table_setup_one(device);
1224 1225 1226 1227 1228
	if (err) {
		kfree(device->cache.ports);
		device->cache.ports = NULL;
		return err;
	}
1229

1230
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1231
		ib_cache_update(device, p + rdma_start_port(device), true);
L
Linus Torvalds 已提交
1232 1233 1234

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
			      device, ib_cache_event);
1235
	ib_register_event_handler(&device->cache.event_handler);
1236
	return 0;
L
Linus Torvalds 已提交
1237 1238
}

1239
void ib_cache_release_one(struct ib_device *device)
L
Linus Torvalds 已提交
1240 1241 1242
{
	int p;

1243 1244 1245 1246 1247 1248
	/*
	 * The release function frees all the cache elements.
	 * This function should be called as part of freeing
	 * all the device's resources when the cache could no
	 * longer be accessed.
	 */
1249 1250
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
		kfree(device->cache.ports[p].pkey);
1251 1252

	gid_table_release_one(device);
1253
	kfree(device->cache.ports);
L
Linus Torvalds 已提交
1254 1255
}

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
void ib_cache_cleanup_one(struct ib_device *device)
{
	/* The cleanup function unregisters the event handler,
	 * waits for all in-progress workqueue elements and cleans
	 * up the GID cache. This function should be called after
	 * the device was removed from the devices list and all
	 * clients were removed, so the cache exists but is
	 * non-functional and shouldn't be updated anymore.
	 */
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);
	gid_table_cleanup_one(device);
}
L
Linus Torvalds 已提交
1269

1270
void __init ib_cache_setup(void)
L
Linus Torvalds 已提交
1271
{
1272
	roce_gid_mgmt_init();
L
Linus Torvalds 已提交
1273 1274 1275 1276
}

void __exit ib_cache_cleanup(void)
{
1277
	roce_gid_mgmt_cleanup();
L
Linus Torvalds 已提交
1278
}