main.c 24.3 KB
Newer Older
1 2
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
S
Simon Horman 已提交
3 4

#include <linux/etherdevice.h>
5
#include <linux/lockdep.h>
S
Simon Horman 已提交
6 7
#include <linux/pci.h>
#include <linux/skbuff.h>
8
#include <linux/vmalloc.h>
S
Simon Horman 已提交
9 10 11
#include <net/devlink.h>
#include <net/dst_metadata.h>

12
#include "main.h"
S
Simon Horman 已提交
13
#include "../nfpcore/nfp_cpp.h"
14
#include "../nfpcore/nfp_nffw.h"
S
Simon Horman 已提交
15 16 17 18 19 20 21 22
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "../nfp_port.h"
#include "./cmsg.h"

23 24
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL

25 26 27
#define NFP_MIN_INT_PORT_ID	1
#define NFP_MAX_INT_PORT_ID	256

S
Simon Horman 已提交
28 29 30 31 32 33 34 35 36 37
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
	return "FLOWER";
}

static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
{
	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
static int
nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
				   struct net_device *netdev)
{
	struct net_device *entry;
	int i, id = 0;

	rcu_read_lock();
	idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
		if (entry == netdev) {
			id = i;
			break;
		}
	rcu_read_unlock();

	return id;
}

static int
nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_flower_priv *priv = app->priv;
	int id;

	id = nfp_flower_lookup_internal_port_id(priv, netdev);
	if (id > 0)
		return id;

	idr_preload(GFP_ATOMIC);
	spin_lock_bh(&priv->internal_ports.lock);
	id = idr_alloc(&priv->internal_ports.port_ids, netdev,
		       NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
	spin_unlock_bh(&priv->internal_ports.lock);
	idr_preload_end();

	return id;
}

u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
				       struct net_device *netdev)
{
	int ext_port;

	if (nfp_netdev_is_nfp_repr(netdev)) {
		return nfp_repr_get_port_id(netdev);
	} else if (nfp_flower_internal_port_can_offload(app, netdev)) {
		ext_port = nfp_flower_get_internal_port_id(app, netdev);
		if (ext_port < 0)
			return 0;

		return nfp_flower_internal_port_get_port_id(ext_port);
	}

	return 0;
}

94 95 96 97 98 99 100 101 102 103 104 105 106
static struct net_device *
nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
{
	struct nfp_flower_priv *priv = app->priv;
	struct net_device *netdev;

	rcu_read_lock();
	netdev = idr_find(&priv->internal_ports.port_ids, port_id);
	rcu_read_unlock();

	return netdev;
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static void
nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_flower_priv *priv = app->priv;
	int id;

	id = nfp_flower_lookup_internal_port_id(priv, netdev);
	if (!id)
		return;

	spin_lock_bh(&priv->internal_ports.lock);
	idr_remove(&priv->internal_ports.port_ids, id);
	spin_unlock_bh(&priv->internal_ports.lock);
}

static int
nfp_flower_internal_port_event_handler(struct nfp_app *app,
				       struct net_device *netdev,
				       unsigned long event)
{
	if (event == NETDEV_UNREGISTER &&
	    nfp_flower_internal_port_can_offload(app, netdev))
		nfp_flower_free_internal_port_id(app, netdev);

	return NOTIFY_OK;
}

static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
{
	spin_lock_init(&priv->internal_ports.lock);
	idr_init(&priv->internal_ports.port_ids);
}

static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
{
	idr_destroy(&priv->internal_ports.port_ids);
}

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_flower_non_repr_priv *entry;

	ASSERT_RTNL();

	list_for_each_entry(entry, &priv->non_repr_priv, list)
		if (entry->netdev == netdev)
			return entry;

	return NULL;
}

void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
{
	non_repr_priv->ref_count++;
}

struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_flower_priv *priv = app->priv;
	struct nfp_flower_non_repr_priv *entry;

	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
	if (entry)
		goto inc_ref;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return NULL;

	entry->netdev = netdev;
	list_add(&entry->list, &priv->non_repr_priv);

inc_ref:
	__nfp_flower_non_repr_priv_get(entry);
	return entry;
}

void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
{
	if (--non_repr_priv->ref_count)
		return;

	list_del(&non_repr_priv->list);
	kfree(non_repr_priv);
}

void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_flower_non_repr_priv *entry;

	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
	if (!entry)
		return;

	__nfp_flower_non_repr_priv_put(entry);
}

S
Simon Horman 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static enum nfp_repr_type
nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
{
	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
				  port_id);
		return NFP_REPR_TYPE_PHYS_PORT;

	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
			return NFP_REPR_TYPE_PF;
		else
			return NFP_REPR_TYPE_VF;
	}

228
	return __NFP_REPR_TYPE_MAX;
S
Simon Horman 已提交
229 230 231
}

static struct net_device *
232
nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
S
Simon Horman 已提交
233 234 235 236 237
{
	enum nfp_repr_type repr_type;
	struct nfp_reprs *reprs;
	u8 port = 0;

238 239 240 241 242 243 244 245 246
	/* Check if the port is internal. */
	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
	    NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
		if (redir_egress)
			*redir_egress = true;
		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
		return nfp_flower_get_netdev_from_internal_port_id(app, port);
	}

S
Simon Horman 已提交
247
	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
248 249
	if (repr_type > NFP_REPR_TYPE_MAX)
		return NULL;
S
Simon Horman 已提交
250 251 252 253 254 255 256 257

	reprs = rcu_dereference(app->reprs[repr_type]);
	if (!reprs)
		return NULL;

	if (port >= reprs->num_reprs)
		return NULL;

258
	return rcu_dereference(reprs->reprs[port]);
S
Simon Horman 已提交
259 260
}

261 262 263 264 265 266 267 268 269 270 271 272
static int
nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
		       bool exists)
{
	struct nfp_reprs *reprs;
	int i, err, count = 0;

	reprs = rcu_dereference_protected(app->reprs[type],
					  lockdep_is_held(&app->pf->lock));
	if (!reprs)
		return 0;

273 274 275 276 277 278
	for (i = 0; i < reprs->num_reprs; i++) {
		struct net_device *netdev;

		netdev = nfp_repr_get_locked(app, reprs, i);
		if (netdev) {
			struct nfp_repr *repr = netdev_priv(netdev);
279 280 281 282 283 284

			err = nfp_flower_cmsg_portreify(repr, exists);
			if (err)
				return err;
			count++;
		}
285
	}
286 287 288 289 290 291 292 293 294 295 296 297 298

	return count;
}

static int
nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
{
	struct nfp_flower_priv *priv = app->priv;

	if (!tot_repl)
		return 0;

	lockdep_assert_held(&app->pf->lock);
299 300 301
	if (!wait_event_timeout(priv->reify_wait_queue,
				atomic_read(replies) >= tot_repl,
				NFP_FL_REPLY_TIMEOUT)) {
302 303 304 305 306 307 308
		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
		return -EIO;
	}

	return 0;
}

309 310
static int
nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
S
Simon Horman 已提交
311 312 313
{
	int err;

314
	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
S
Simon Horman 已提交
315 316 317
	if (err)
		return err;

318
	netif_tx_wake_all_queues(repr->netdev);
S
Simon Horman 已提交
319 320 321 322

	return 0;
}

323 324
static int
nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
S
Simon Horman 已提交
325
{
326
	netif_tx_disable(repr->netdev);
S
Simon Horman 已提交
327

328
	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
S
Simon Horman 已提交
329 330
}

331 332 333
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
334 335 336
	struct nfp_repr *repr = netdev_priv(netdev);

	kfree(repr->app_priv);
337 338
}

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
static void
nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
{
	struct nfp_repr *repr = netdev_priv(netdev);
	struct nfp_flower_priv *priv = app->priv;
	atomic_t *replies = &priv->reify_replies;
	int err;

	atomic_set(replies, 0);
	err = nfp_flower_cmsg_portreify(repr, false);
	if (err) {
		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
		return;
	}

	nfp_flower_wait_repr_reify(app, replies, 1);
}

357 358
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
359 360 361 362 363
	struct nfp_flower_priv *priv = app->priv;

	if (!priv->nn)
		return;

364 365 366 367 368 369 370 371 372 373
	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
}

static int
nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
			    enum nfp_repr_type repr_type, unsigned int cnt)
{
	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
	struct nfp_flower_priv *priv = app->priv;
374
	atomic_t *replies = &priv->reify_replies;
375
	struct nfp_flower_repr_priv *repr_priv;
376
	enum nfp_port_type port_type;
377
	struct nfp_repr *nfp_repr;
378
	struct nfp_reprs *reprs;
379
	int i, err, reify_cnt;
380 381
	const u8 queue = 0;

382 383 384
	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
						    NFP_PORT_VF_PORT;

385 386 387 388 389
	reprs = nfp_reprs_alloc(cnt);
	if (!reprs)
		return -ENOMEM;

	for (i = 0; i < cnt; i++) {
390
		struct net_device *repr;
391
		struct nfp_port *port;
392 393
		u32 port_id;

394 395
		repr = nfp_repr_alloc(app);
		if (!repr) {
396 397 398 399
			err = -ENOMEM;
			goto err_reprs_clean;
		}

400 401 402
		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
		if (!repr_priv) {
			err = -ENOMEM;
403
			nfp_repr_free(repr);
404 405 406 407 408
			goto err_reprs_clean;
		}

		nfp_repr = netdev_priv(repr);
		nfp_repr->app_priv = repr_priv;
409
		repr_priv->nfp_repr = nfp_repr;
410

411 412 413
		/* For now we only support 1 PF */
		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);

414
		port = nfp_port_alloc(app, port_type, repr);
415 416
		if (IS_ERR(port)) {
			err = PTR_ERR(port);
417
			kfree(repr_priv);
418 419 420
			nfp_repr_free(repr);
			goto err_reprs_clean;
		}
421 422
		if (repr_type == NFP_REPR_TYPE_PF) {
			port->pf_id = i;
423
			port->vnic = priv->nn->dp.ctrl_bar;
424
		} else {
425
			port->pf_id = 0;
426
			port->vf_id = i;
427 428
			port->vnic =
				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
429 430
		}

431
		eth_hw_addr_random(repr);
432 433 434

		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
						    i, queue);
435
		err = nfp_repr_init(app, repr,
436 437
				    port_id, port, priv->nn->dp.netdev);
		if (err) {
438
			kfree(repr_priv);
439
			nfp_port_free(port);
440
			nfp_repr_free(repr);
441
			goto err_reprs_clean;
442
		}
443

444
		RCU_INIT_POINTER(reprs->reprs[i], repr);
445 446
		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
447
			 repr->name);
448 449
	}

450
	nfp_app_reprs_set(app, repr_type, reprs);
451

452 453 454 455 456 457 458 459 460 461 462 463
	atomic_set(replies, 0);
	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
	if (reify_cnt < 0) {
		err = reify_cnt;
		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
		goto err_reprs_remove;
	}

	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
	if (err)
		goto err_reprs_remove;

464
	return 0;
465 466
err_reprs_remove:
	reprs = nfp_app_reprs_set(app, repr_type, NULL);
467
err_reprs_clean:
468
	nfp_reprs_clean_and_free(app, reprs);
469 470 471 472 473
	return err;
}

static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
{
474 475 476 477 478
	struct nfp_flower_priv *priv = app->priv;

	if (!priv->nn)
		return 0;

479 480 481 482 483 484 485
	return nfp_flower_spawn_vnic_reprs(app,
					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
					   NFP_REPR_TYPE_VF, num_vfs);
}

static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
S
Simon Horman 已提交
486 487
{
	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
488
	atomic_t *replies = &priv->reify_replies;
489 490
	struct nfp_flower_repr_priv *repr_priv;
	struct nfp_repr *nfp_repr;
491
	struct sk_buff *ctrl_skb;
492
	struct nfp_reprs *reprs;
493
	int err, reify_cnt;
S
Simon Horman 已提交
494 495
	unsigned int i;

496 497
	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
	if (!ctrl_skb)
S
Simon Horman 已提交
498 499
		return -ENOMEM;

500 501 502 503 504 505
	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
	if (!reprs) {
		err = -ENOMEM;
		goto err_free_ctrl_skb;
	}

S
Simon Horman 已提交
506
	for (i = 0; i < eth_tbl->count; i++) {
507
		unsigned int phys_port = eth_tbl->ports[i].index;
508
		struct net_device *repr;
S
Simon Horman 已提交
509 510 511
		struct nfp_port *port;
		u32 cmsg_port_id;

512 513
		repr = nfp_repr_alloc(app);
		if (!repr) {
S
Simon Horman 已提交
514 515 516 517
			err = -ENOMEM;
			goto err_reprs_clean;
		}

518 519 520
		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
		if (!repr_priv) {
			err = -ENOMEM;
521
			nfp_repr_free(repr);
522 523 524 525 526
			goto err_reprs_clean;
		}

		nfp_repr = netdev_priv(repr);
		nfp_repr->app_priv = repr_priv;
527
		repr_priv->nfp_repr = nfp_repr;
528

529
		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
S
Simon Horman 已提交
530 531
		if (IS_ERR(port)) {
			err = PTR_ERR(port);
532
			kfree(repr_priv);
533
			nfp_repr_free(repr);
S
Simon Horman 已提交
534 535 536 537
			goto err_reprs_clean;
		}
		err = nfp_port_init_phy_port(app->pf, app, port, i);
		if (err) {
538
			kfree(repr_priv);
S
Simon Horman 已提交
539
			nfp_port_free(port);
540
			nfp_repr_free(repr);
S
Simon Horman 已提交
541 542 543
			goto err_reprs_clean;
		}

544
		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
545
		nfp_net_get_mac_addr(app->pf, repr, port);
S
Simon Horman 已提交
546 547

		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
548
		err = nfp_repr_init(app, repr,
S
Simon Horman 已提交
549 550
				    cmsg_port_id, port, priv->nn->dp.netdev);
		if (err) {
551
			kfree(repr_priv);
S
Simon Horman 已提交
552
			nfp_port_free(port);
553
			nfp_repr_free(repr);
S
Simon Horman 已提交
554 555 556
			goto err_reprs_clean;
		}

557 558 559 560 561
		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
					     eth_tbl->ports[i].nbi,
					     eth_tbl->ports[i].base,
					     phys_port);

562
		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
S
Simon Horman 已提交
563
		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
564
			 phys_port, repr->name);
S
Simon Horman 已提交
565 566
	}

567
	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
S
Simon Horman 已提交
568

569
	/* The REIFY/MAC_REPR control messages should be sent after the MAC
570 571 572 573 574 575
	 * representors are registered using nfp_app_reprs_set().  This is
	 * because the firmware may respond with control messages for the
	 * MAC representors, f.e. to provide the driver with information
	 * about their state, and without registration the driver will drop
	 * any such messages.
	 */
576 577 578 579 580 581 582 583 584 585 586 587
	atomic_set(replies, 0);
	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
	if (reify_cnt < 0) {
		err = reify_cnt;
		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
		goto err_reprs_remove;
	}

	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
	if (err)
		goto err_reprs_remove;

588 589
	nfp_ctrl_tx(app->ctrl, ctrl_skb);

S
Simon Horman 已提交
590
	return 0;
591 592
err_reprs_remove:
	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
S
Simon Horman 已提交
593
err_reprs_clean:
594
	nfp_reprs_clean_and_free(app, reprs);
595 596
err_free_ctrl_skb:
	kfree_skb(ctrl_skb);
S
Simon Horman 已提交
597 598 599
	return err;
}

600 601
static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
				 unsigned int id)
S
Simon Horman 已提交
602 603 604 605 606 607 608 609
{
	if (id > 0) {
		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
		goto err_invalid_port;
	}

	eth_hw_addr_random(nn->dp.netdev);
	netif_keep_dst(nn->dp.netdev);
610
	nn->vnic_no_name = true;
S
Simon Horman 已提交
611 612 613 614 615 616 617 618

	return 0;

err_invalid_port:
	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
	return PTR_ERR_OR_ZERO(nn->port);
}

619 620 621 622
static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
{
	struct nfp_flower_priv *priv = app->priv;

623 624 625 626 627
	if (app->pf->num_vfs)
		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);

628 629 630 631 632 633
	priv->nn = NULL;
}

static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
{
	struct nfp_flower_priv *priv = app->priv;
634
	int err;
635 636 637

	priv->nn = nn;

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
	err = nfp_flower_spawn_phy_reprs(app, app->priv);
	if (err)
		goto err_clear_nn;

	err = nfp_flower_spawn_vnic_reprs(app,
					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
					  NFP_REPR_TYPE_PF, 1);
	if (err)
		goto err_destroy_reprs_phy;

	if (app->pf->num_vfs) {
		err = nfp_flower_spawn_vnic_reprs(app,
						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
						  NFP_REPR_TYPE_VF,
						  app->pf->num_vfs);
		if (err)
			goto err_destroy_reprs_pf;
	}

657
	return 0;
658 659 660 661 662 663 664 665

err_destroy_reprs_pf:
	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
err_destroy_reprs_phy:
	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
err_clear_nn:
	priv->nn = NULL;
	return err;
666 667
}

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
static void nfp_flower_wait_host_bit(struct nfp_app *app)
{
	unsigned long err_at;
	u64 feat;
	int err;

	/* Wait for HOST_ACK flag bit to propagate */
	err_at = jiffies + msecs_to_jiffies(100);
	do {
		feat = nfp_rtsym_read_le(app->pf->rtbl,
					 "_abi_flower_combined_features_global",
					 &err);
		if (time_is_before_eq_jiffies(err_at)) {
			nfp_warn(app->cpp,
				 "HOST_ACK bit not propagated in FW.\n");
			break;
		}
		usleep_range(1000, 2000);
	} while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));

	if (err)
		nfp_warn(app->cpp,
			 "Could not read global features entry from FW\n");
}

static int nfp_flower_sync_feature_bits(struct nfp_app *app)
{
	struct nfp_flower_priv *app_priv = app->priv;
	int err;

	/* Tell the firmware of the host supported features. */
	err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask",
				 app_priv->flower_ext_feats |
				 NFP_FL_FEATS_HOST_ACK);
	if (!err)
		nfp_flower_wait_host_bit(app);
	else if (err != -ENOENT)
		return err;

	/* Tell the firmware that the driver supports lag. */
	err = nfp_rtsym_write_le(app->pf->rtbl,
				 "_abi_flower_balance_sync_enable", 1);
	if (!err) {
711
		app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
712 713 714 715 716 717 718 719 720 721 722 723
		nfp_flower_lag_init(&app_priv->nfp_lag);
	} else if (err == -ENOENT) {
		nfp_warn(app->cpp, "LAG not supported by FW.\n");
	} else {
		return err;
	}

	if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
		/* Tell the firmware that the driver supports flow merging. */
		err = nfp_rtsym_write_le(app->pf->rtbl,
					 "_abi_flower_merge_hint_enable", 1);
		if (!err) {
724
			app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
725 726 727 728 729 730 731 732 733 734 735 736 737 738
			nfp_flower_internal_port_init(app_priv);
		} else if (err == -ENOENT) {
			nfp_warn(app->cpp,
				 "Flow merge not supported by FW.\n");
		} else {
			return err;
		}
	} else {
		nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
	}

	return 0;
}

S
Simon Horman 已提交
739 740
static int nfp_flower_init(struct nfp_app *app)
{
741
	u64 version, features, ctx_count, num_mems;
S
Simon Horman 已提交
742
	const struct nfp_pf *pf = app->pf;
743
	struct nfp_flower_priv *app_priv;
744
	int err;
S
Simon Horman 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760

	if (!pf->eth_tbl) {
		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
		return -EINVAL;
	}

	if (!pf->mac_stats_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
		return -EINVAL;
	}

	if (!pf->vf_cfg_bar) {
		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
		return -EINVAL;
	}

761 762 763 764 765 766
	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
	if (err) {
		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
		return err;
	}

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
	num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
				     &err);
	if (err) {
		nfp_warn(app->cpp,
			 "FlowerNIC: unsupported host context memory: %d\n",
			 err);
		err = 0;
		num_mems = 1;
	}

	if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
		nfp_warn(app->cpp,
			 "FlowerNIC: invalid host context memory: %llu\n",
			 num_mems);
		return -EINVAL;
	}

784 785 786 787 788 789 790 791 792 793
	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
				      &err);
	if (err) {
		nfp_warn(app->cpp,
			 "FlowerNIC: unsupported host context count: %d\n",
			 err);
		err = 0;
		ctx_count = BIT(17);
	}

794 795 796 797 798 799
	/* We need to ensure hardware has enough flower capabilities. */
	if (version != NFP_FLOWER_ALLOWED_VER) {
		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
		return -EINVAL;
	}

800 801
	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
	if (!app_priv)
802 803
		return -ENOMEM;

804 805
	app_priv->total_mem_units = num_mems;
	app_priv->active_mem_unit = 0;
806
	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
807
	app->priv = app_priv;
808
	app_priv->app = app;
809 810
	skb_queue_head_init(&app_priv->cmsg_skbs_high);
	skb_queue_head_init(&app_priv->cmsg_skbs_low);
811
	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
812
	init_waitqueue_head(&app_priv->reify_wait_queue);
813

814 815 816
	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
	spin_lock_init(&app_priv->mtu_conf.lock);

817
	err = nfp_flower_metadata_init(app, ctx_count, num_mems);
818 819 820
	if (err)
		goto err_free_app_priv;

821 822 823 824 825 826
	/* Extract the extra features supported by the firmware. */
	features = nfp_rtsym_read_le(app->pf->rtbl,
				     "_abi_flower_extra_features", &err);
	if (err)
		app_priv->flower_ext_feats = 0;
	else
827
		app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
828

829 830 831
	err = nfp_flower_sync_feature_bits(app);
	if (err)
		goto err_cleanup;
832

833 834 835 836
	err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
	if (err)
		goto err_cleanup;

837 838 839
	if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
		nfp_flower_qos_init(app);

840
	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
841
	INIT_LIST_HEAD(&app_priv->non_repr_priv);
842
	app_priv->pre_tun_rule_cnt = 0;
843

S
Simon Horman 已提交
844
	return 0;
845

846
err_cleanup:
847
	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
848
		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
849
	nfp_flower_metadata_cleanup(app);
850 851 852
err_free_app_priv:
	vfree(app->priv);
	return err;
S
Simon Horman 已提交
853 854
}

855 856
static void nfp_flower_clean(struct nfp_app *app)
{
857 858
	struct nfp_flower_priv *app_priv = app->priv;

859 860
	skb_queue_purge(&app_priv->cmsg_skbs_high);
	skb_queue_purge(&app_priv->cmsg_skbs_low);
861 862
	flush_work(&app_priv->cmsg_work);

863 864 865
	flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
				 nfp_flower_setup_indr_block_cb);

866 867 868
	if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
		nfp_flower_qos_cleanup(app);

869
	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
870 871
		nfp_flower_lag_cleanup(&app_priv->nfp_lag);

872
	if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
873 874
		nfp_flower_internal_port_cleanup(app_priv);

875
	nfp_flower_metadata_cleanup(app);
876
	vfree(app->priv);
877 878 879
	app->priv = NULL;
}

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
{
	bool ret;

	spin_lock_bh(&app_priv->mtu_conf.lock);
	ret = app_priv->mtu_conf.ack;
	spin_unlock_bh(&app_priv->mtu_conf.lock);

	return ret;
}

static int
nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
			   int new_mtu)
{
	struct nfp_flower_priv *app_priv = app->priv;
	struct nfp_repr *repr = netdev_priv(netdev);
897
	int err;
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923

	/* Only need to config FW for physical port MTU change. */
	if (repr->port->type != NFP_PORT_PHYS_PORT)
		return 0;

	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
		return -EINVAL;
	}

	spin_lock_bh(&app_priv->mtu_conf.lock);
	app_priv->mtu_conf.ack = false;
	app_priv->mtu_conf.requested_val = new_mtu;
	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
	spin_unlock_bh(&app_priv->mtu_conf.lock);

	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
				      true);
	if (err) {
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		return err;
	}

	/* Wait for fw to ack the change. */
924 925 926
	if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
				nfp_flower_check_ack(app_priv),
				NFP_FL_REPLY_TIMEOUT)) {
927 928 929 930 931 932 933 934 935 936
		spin_lock_bh(&app_priv->mtu_conf.lock);
		app_priv->mtu_conf.requested_val = 0;
		spin_unlock_bh(&app_priv->mtu_conf.lock);
		nfp_warn(app->cpp, "MTU change not verified with fw\n");
		return -EIO;
	}

	return 0;
}

937 938
static int nfp_flower_start(struct nfp_app *app)
{
939 940 941
	struct nfp_flower_priv *app_priv = app->priv;
	int err;

942
	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
943 944 945 946 947
		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
		if (err)
			return err;
	}

948 949 950 951
	return nfp_tunnel_config_start(app);
}

static void nfp_flower_stop(struct nfp_app *app)
952 953 954 955 956 957 958
{
	nfp_tunnel_config_stop(app);
}

static int
nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
			unsigned long event, void *ptr)
959
{
960
	struct nfp_flower_priv *app_priv = app->priv;
961
	int ret;
962

963
	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
964 965 966 967
		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
		if (ret & NOTIFY_STOP_MASK)
			return ret;
	}
968

969 970
	ret = nfp_flower_internal_port_event_handler(app, netdev, event);
	if (ret & NOTIFY_STOP_MASK)
971 972
		return ret;

973
	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
974 975
}

S
Simon Horman 已提交
976 977 978
const struct nfp_app_type app_flower = {
	.id		= NFP_APP_FLOWER_NIC,
	.name		= "flower",
979 980

	.ctrl_cap_mask	= ~0U,
S
Simon Horman 已提交
981 982 983 984 985
	.ctrl_has_meta	= true,

	.extra_cap	= nfp_flower_extra_cap,

	.init		= nfp_flower_init,
986
	.clean		= nfp_flower_clean,
S
Simon Horman 已提交
987

988 989
	.repr_change_mtu  = nfp_flower_repr_change_mtu,

990
	.vnic_alloc	= nfp_flower_vnic_alloc,
S
Simon Horman 已提交
991
	.vnic_init	= nfp_flower_vnic_init,
992
	.vnic_clean	= nfp_flower_vnic_clean,
S
Simon Horman 已提交
993

994
	.repr_preclean	= nfp_flower_repr_netdev_preclean,
995 996
	.repr_clean	= nfp_flower_repr_netdev_clean,

997 998 999
	.repr_open	= nfp_flower_repr_netdev_open,
	.repr_stop	= nfp_flower_repr_netdev_stop,

1000 1001 1002
	.start		= nfp_flower_start,
	.stop		= nfp_flower_stop,

1003 1004
	.netdev_event	= nfp_flower_netdev_event,

S
Simon Horman 已提交
1005 1006
	.ctrl_msg_rx	= nfp_flower_cmsg_rx,

1007 1008 1009
	.sriov_enable	= nfp_flower_sriov_enable,
	.sriov_disable	= nfp_flower_sriov_disable,

S
Simon Horman 已提交
1010
	.eswitch_mode_get  = eswitch_mode_get,
1011
	.dev_get	= nfp_flower_dev_get,
1012 1013

	.setup_tc	= nfp_flower_setup_tc,
S
Simon Horman 已提交
1014
};