spectrum_router.c 230.6 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
3 4 5

#include <linux/kernel.h>
#include <linux/types.h>
6 7 8
#include <linux/rhashtable.h>
#include <linux/bitops.h>
#include <linux/in6.h>
9
#include <linux/notifier.h>
10
#include <linux/inetdevice.h>
11
#include <linux/netdevice.h>
12
#include <linux/if_bridge.h>
13
#include <linux/socket.h>
14
#include <linux/route.h>
15
#include <linux/gcd.h>
16
#include <linux/if_macvlan.h>
17
#include <linux/refcount.h>
18
#include <linux/jhash.h>
19
#include <linux/net_namespace.h>
20
#include <linux/mutex.h>
21
#include <net/netevent.h>
22 23
#include <net/neighbour.h>
#include <net/arp.h>
24
#include <net/ip_fib.h>
25
#include <net/ip6_fib.h>
26
#include <net/nexthop.h>
27
#include <net/fib_rules.h>
28
#include <net/ip_tunnels.h>
29
#include <net/l3mdev.h>
30
#include <net/addrconf.h>
31 32
#include <net/ndisc.h>
#include <net/ipv6.h>
33
#include <net/fib_notifier.h>
34
#include <net/switchdev.h>
35 36 37 38

#include "spectrum.h"
#include "core.h"
#include "reg.h"
39 40
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
41
#include "spectrum_ipip.h"
42 43
#include "spectrum_mr.h"
#include "spectrum_mr_tcam.h"
44
#include "spectrum_router.h"
45
#include "spectrum_span.h"
46

47
struct mlxsw_sp_fib;
48 49
struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
50
struct mlxsw_sp_rif_ops;
51

52 53 54
struct mlxsw_sp_rif {
	struct list_head nexthop_list;
	struct list_head neigh_list;
55
	struct net_device *dev; /* NULL for underlay RIF */
56
	struct mlxsw_sp_fid *fid;
57 58
	unsigned char addr[ETH_ALEN];
	int mtu;
59
	u16 rif_index;
60
	u16 vr_id;
61 62 63
	const struct mlxsw_sp_rif_ops *ops;
	struct mlxsw_sp *mlxsw_sp;

64 65 66 67
	unsigned int counter_ingress;
	bool counter_ingress_valid;
	unsigned int counter_egress;
	bool counter_egress_valid;
68 69
};

70 71 72 73 74 75 76 77 78 79
struct mlxsw_sp_rif_params {
	struct net_device *dev;
	union {
		u16 system_port;
		u16 lag_id;
	};
	u16 vid;
	bool lag;
};

80 81
struct mlxsw_sp_rif_subport {
	struct mlxsw_sp_rif common;
82
	refcount_t ref_count;
83 84 85 86 87 88 89 90
	union {
		u16 system_port;
		u16 lag_id;
	};
	u16 vid;
	bool lag;
};

91 92 93 94
struct mlxsw_sp_rif_ipip_lb {
	struct mlxsw_sp_rif common;
	struct mlxsw_sp_rif_ipip_lb_config lb_config;
	u16 ul_vr_id; /* Reserved for Spectrum-2. */
95
	u16 ul_rif_id; /* Reserved for Spectrum. */
96 97 98 99 100 101 102
};

struct mlxsw_sp_rif_params_ipip_lb {
	struct mlxsw_sp_rif_params common;
	struct mlxsw_sp_rif_ipip_lb_config lb_config;
};

103 104 105 106 107 108 109 110
struct mlxsw_sp_rif_ops {
	enum mlxsw_sp_rif_type type;
	size_t rif_size;

	void (*setup)(struct mlxsw_sp_rif *rif,
		      const struct mlxsw_sp_rif_params *params);
	int (*configure)(struct mlxsw_sp_rif *rif);
	void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 112
	struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
					 struct netlink_ext_ack *extack);
113
	void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
114 115
};

116 117 118
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
			 const struct net_device *dev);
119
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 121 122 123 124 125 126 127 128
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_lpm_tree *lpm_tree);
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
				     const struct mlxsw_sp_fib *fib,
				     u8 tree_id);
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
				       const struct mlxsw_sp_fib *fib);

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static unsigned int *
mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
			   enum mlxsw_sp_rif_counter_dir dir)
{
	switch (dir) {
	case MLXSW_SP_RIF_COUNTER_EGRESS:
		return &rif->counter_egress;
	case MLXSW_SP_RIF_COUNTER_INGRESS:
		return &rif->counter_ingress;
	}
	return NULL;
}

static bool
mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
			       enum mlxsw_sp_rif_counter_dir dir)
{
	switch (dir) {
	case MLXSW_SP_RIF_COUNTER_EGRESS:
		return rif->counter_egress_valid;
	case MLXSW_SP_RIF_COUNTER_INGRESS:
		return rif->counter_ingress_valid;
	}
	return false;
}

static void
mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
			       enum mlxsw_sp_rif_counter_dir dir,
			       bool valid)
{
	switch (dir) {
	case MLXSW_SP_RIF_COUNTER_EGRESS:
		rif->counter_egress_valid = valid;
		break;
	case MLXSW_SP_RIF_COUNTER_INGRESS:
		rif->counter_ingress_valid = valid;
		break;
	}
}

static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
				     unsigned int counter_index, bool enable,
				     enum mlxsw_sp_rif_counter_dir dir)
{
	char ritr_pl[MLXSW_REG_RITR_LEN];
	bool is_egress = false;
	int err;

	if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
		is_egress = true;
	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
	if (err)
		return err;

	mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
				    is_egress);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_rif *rif,
				   enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
{
	char ricnt_pl[MLXSW_REG_RICNT_LEN];
	unsigned int *p_counter_index;
	bool valid;
	int err;

	valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
	if (!valid)
		return -EINVAL;

	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
	if (!p_counter_index)
		return -EINVAL;
	mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
			     MLXSW_REG_RICNT_OPCODE_NOP);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
	if (err)
		return err;
	*cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
	return 0;
}

static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
				      unsigned int counter_index)
{
	char ricnt_pl[MLXSW_REG_RICNT_LEN];

	mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
			     MLXSW_REG_RICNT_OPCODE_CLEAR);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
}

int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_rif *rif,
			       enum mlxsw_sp_rif_counter_dir dir)
{
	unsigned int *p_counter_index;
	int err;

	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
	if (!p_counter_index)
		return -EINVAL;
	err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
				     p_counter_index);
	if (err)
		return err;

	err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
	if (err)
		goto err_counter_clear;

	err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
					*p_counter_index, true, dir);
	if (err)
		goto err_counter_edit;
	mlxsw_sp_rif_counter_valid_set(rif, dir, true);
	return 0;

err_counter_edit:
err_counter_clear:
	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
			      *p_counter_index);
	return err;
}

void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_rif *rif,
			       enum mlxsw_sp_rif_counter_dir dir)
{
	unsigned int *p_counter_index;

264 265 266
	if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
		return;

267 268 269 270 271 272 273 274 275 276
	p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
	if (WARN_ON(!p_counter_index))
		return;
	mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
				  *p_counter_index, false, dir);
	mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
			      *p_counter_index);
	mlxsw_sp_rif_counter_valid_set(rif, dir, false);
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
{
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct devlink *devlink;

	devlink = priv_to_devlink(mlxsw_sp->core);
	if (!devlink_dpipe_table_counter_enabled(devlink,
						 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
		return;
	mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}

static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
{
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;

	mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
}

296
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
297 298 299 300 301

struct mlxsw_sp_prefix_usage {
	DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
};

302 303 304 305 306 307 308 309 310 311
#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
	for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)

static bool
mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
			 struct mlxsw_sp_prefix_usage *prefix_usage2)
{
	return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}

312 313 314 315 316 317 318
static void
mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
			  struct mlxsw_sp_prefix_usage *prefix_usage2)
{
	memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static void
mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
			  unsigned char prefix_len)
{
	set_bit(prefix_len, prefix_usage->b);
}

static void
mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
			    unsigned char prefix_len)
{
	clear_bit(prefix_len, prefix_usage->b);
}

struct mlxsw_sp_fib_key {
	unsigned char addr[sizeof(struct in6_addr)];
	unsigned char prefix_len;
};

338 339 340 341
enum mlxsw_sp_fib_entry_type {
	MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
	MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
	MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342
	MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343
	MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
344 345 346 347 348 349 350 351

	/* This is a special case of local delivery, where a packet should be
	 * decapsulated on reception. Note that there is no corresponding ENCAP,
	 * because that's a type of next hop, not of FIB entry. (There can be
	 * several next hops in a REMOTE entry, and some of them may be
	 * encapsulating entries.)
	 */
	MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352
	MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
353 354
};

355
struct mlxsw_sp_nexthop_group;
356
struct mlxsw_sp_fib_entry;
357

358
struct mlxsw_sp_fib_node {
359
	struct mlxsw_sp_fib_entry *fib_entry;
360
	struct list_head list;
361
	struct rhash_head ht_node;
362
	struct mlxsw_sp_fib *fib;
363
	struct mlxsw_sp_fib_key key;
364 365
};

366 367 368 369 370
struct mlxsw_sp_fib_entry_decap {
	struct mlxsw_sp_ipip_entry *ipip_entry;
	u32 tunnel_index;
};

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
static struct mlxsw_sp_fib_entry_priv *
mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
{
	struct mlxsw_sp_fib_entry_priv *priv;

	if (!ll_ops->fib_entry_priv_size)
		/* No need to have priv */
		return NULL;

	priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
	if (!priv)
		return ERR_PTR(-ENOMEM);
	refcount_set(&priv->refcnt, 1);
	return priv;
}

static void
mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
{
	kfree(priv);
}

static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
{
	refcount_inc(&priv->refcnt);
}

static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
{
	if (!priv || !refcount_dec_and_test(&priv->refcnt))
		return;
	mlxsw_sp_fib_entry_priv_destroy(priv);
}

static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
						struct mlxsw_sp_fib_entry_priv *priv)
{
	if (!priv)
		return;
	mlxsw_sp_fib_entry_priv_hold(priv);
	list_add(&priv->list, &op_ctx->fib_entry_priv_list);
}

static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
{
	struct mlxsw_sp_fib_entry_priv *priv, *tmp;

	list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
		mlxsw_sp_fib_entry_priv_put(priv);
	INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
}

423 424
struct mlxsw_sp_fib_entry {
	struct mlxsw_sp_fib_node *fib_node;
425
	enum mlxsw_sp_fib_entry_type type;
426 427
	struct list_head nexthop_group_node;
	struct mlxsw_sp_nexthop_group *nh_group;
428
	struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
429
	struct mlxsw_sp_fib_entry_priv *priv;
430 431
};

432 433
struct mlxsw_sp_fib4_entry {
	struct mlxsw_sp_fib_entry common;
434
	struct fib_info *fi;
435 436 437 438 439
	u32 tb_id;
	u8 tos;
	u8 type;
};

440 441 442 443 444 445 446 447
struct mlxsw_sp_fib6_entry {
	struct mlxsw_sp_fib_entry common;
	struct list_head rt6_list;
	unsigned int nrt6;
};

struct mlxsw_sp_rt6 {
	struct list_head list;
448
	struct fib6_info *rt;
449 450
};

451 452 453 454
struct mlxsw_sp_lpm_tree {
	u8 id; /* tree ID */
	unsigned int ref_count;
	enum mlxsw_sp_l3proto proto;
455
	unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
456 457 458
	struct mlxsw_sp_prefix_usage prefix_usage;
};

459 460
struct mlxsw_sp_fib {
	struct rhashtable ht;
461
	struct list_head node_list;
462 463 464
	struct mlxsw_sp_vr *vr;
	struct mlxsw_sp_lpm_tree *lpm_tree;
	enum mlxsw_sp_l3proto proto;
465
	const struct mlxsw_sp_router_ll_ops *ll_ops;
466 467
};

468 469 470 471 472
struct mlxsw_sp_vr {
	u16 id; /* virtual router ID */
	u32 tb_id; /* kernel fib table id */
	unsigned int rif_count;
	struct mlxsw_sp_fib *fib4;
473
	struct mlxsw_sp_fib *fib6;
474
	struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
475 476
	struct mlxsw_sp_rif *ul_rif;
	refcount_t ul_rif_refcnt;
477 478
};

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
{
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
			       xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
}

static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
{
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
			       xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
}

static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
{
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
			       xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
}

497
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
498

499 500
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_vr *vr,
501
						enum mlxsw_sp_l3proto proto)
502
{
503
	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
504
	struct mlxsw_sp_lpm_tree *lpm_tree;
505 506 507
	struct mlxsw_sp_fib *fib;
	int err;

508
	lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
509 510 511 512 513 514
	fib = kzalloc(sizeof(*fib), GFP_KERNEL);
	if (!fib)
		return ERR_PTR(-ENOMEM);
	err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
	if (err)
		goto err_rhashtable_init;
515
	INIT_LIST_HEAD(&fib->node_list);
516 517
	fib->proto = proto;
	fib->vr = vr;
518
	fib->lpm_tree = lpm_tree;
519
	fib->ll_ops = ll_ops;
520 521 522 523
	mlxsw_sp_lpm_tree_hold(lpm_tree);
	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
	if (err)
		goto err_lpm_tree_bind;
524 525
	return fib;

526 527
err_lpm_tree_bind:
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
528 529 530 531 532
err_rhashtable_init:
	kfree(fib);
	return ERR_PTR(err);
}

533 534
static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_fib *fib)
535
{
536 537
	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
	mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
538
	WARN_ON(!list_empty(&fib->node_list));
539 540 541 542
	rhashtable_destroy(&fib->ht);
	kfree(fib);
}

543
static struct mlxsw_sp_lpm_tree *
544
mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
545 546 547 548
{
	static struct mlxsw_sp_lpm_tree *lpm_tree;
	int i;

549 550
	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
551 552
		if (lpm_tree->ref_count == 0)
			return lpm_tree;
553 554 555 556 557
	}
	return NULL;
}

static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
558
				   const struct mlxsw_sp_router_ll_ops *ll_ops,
559 560
				   struct mlxsw_sp_lpm_tree *lpm_tree)
{
561
	char xralta_pl[MLXSW_REG_XRALTA_LEN];
562

563 564 565 566
	mlxsw_reg_xralta_pack(xralta_pl, true,
			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
			      lpm_tree->id);
	return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
567 568
}

569
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
570
				   const struct mlxsw_sp_router_ll_ops *ll_ops,
571
				   struct mlxsw_sp_lpm_tree *lpm_tree)
572
{
573
	char xralta_pl[MLXSW_REG_XRALTA_LEN];
574

575 576 577 578
	mlxsw_reg_xralta_pack(xralta_pl, false,
			      (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
			      lpm_tree->id);
	ll_ops->ralta_write(mlxsw_sp, xralta_pl);
579 580 581 582
}

static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
583
				  const struct mlxsw_sp_router_ll_ops *ll_ops,
584 585 586
				  struct mlxsw_sp_prefix_usage *prefix_usage,
				  struct mlxsw_sp_lpm_tree *lpm_tree)
{
587
	char xralst_pl[MLXSW_REG_XRALST_LEN];
588 589 590 591 592 593 594
	u8 root_bin = 0;
	u8 prefix;
	u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;

	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
		root_bin = prefix;

595
	mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
596 597 598
	mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
		if (prefix == 0)
			continue;
599 600
		mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
					  MLXSW_REG_RALST_BIN_NO_CHILD);
601 602
		last_prefix = prefix;
	}
603
	return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
604 605 606 607
}

static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
608
			 const struct mlxsw_sp_router_ll_ops *ll_ops,
609
			 struct mlxsw_sp_prefix_usage *prefix_usage,
610
			 enum mlxsw_sp_l3proto proto)
611 612 613 614
{
	struct mlxsw_sp_lpm_tree *lpm_tree;
	int err;

615
	lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
616 617 618
	if (!lpm_tree)
		return ERR_PTR(-EBUSY);
	lpm_tree->proto = proto;
619
	err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
620 621 622
	if (err)
		return ERR_PTR(err);

623
	err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
624 625
	if (err)
		goto err_left_struct_set;
626 627
	memcpy(&lpm_tree->prefix_usage, prefix_usage,
	       sizeof(lpm_tree->prefix_usage));
628 629 630
	memset(&lpm_tree->prefix_ref_count, 0,
	       sizeof(lpm_tree->prefix_ref_count));
	lpm_tree->ref_count = 1;
631 632 633
	return lpm_tree;

err_left_struct_set:
634
	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
635 636 637
	return ERR_PTR(err);
}

638
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
639
				      const struct mlxsw_sp_router_ll_ops *ll_ops,
640
				      struct mlxsw_sp_lpm_tree *lpm_tree)
641
{
642
	mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
643 644 645 646 647
}

static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
		      struct mlxsw_sp_prefix_usage *prefix_usage,
648
		      enum mlxsw_sp_l3proto proto)
649
{
650
	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
651 652 653
	struct mlxsw_sp_lpm_tree *lpm_tree;
	int i;

654 655
	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
656 657
		if (lpm_tree->ref_count != 0 &&
		    lpm_tree->proto == proto &&
658
		    mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
659 660
					     prefix_usage)) {
			mlxsw_sp_lpm_tree_hold(lpm_tree);
661
			return lpm_tree;
662
		}
663
	}
664
	return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
665
}
666

667 668
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
669 670 671
	lpm_tree->ref_count++;
}

672 673
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_lpm_tree *lpm_tree)
674
{
675 676 677
	const struct mlxsw_sp_router_ll_ops *ll_ops =
				mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];

678
	if (--lpm_tree->ref_count == 0)
679
		mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
680 681
}

682
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
683 684

static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
685
{
686
	struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
687
	struct mlxsw_sp_lpm_tree *lpm_tree;
688
	u64 max_trees;
689
	int err, i;
690

691 692 693 694
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
		return -EIO;

	max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
695 696
	mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
	mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
697 698
					     sizeof(struct mlxsw_sp_lpm_tree),
					     GFP_KERNEL);
699
	if (!mlxsw_sp->router->lpm.trees)
700 701
		return -ENOMEM;

702 703
	for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
		lpm_tree = &mlxsw_sp->router->lpm.trees[i];
704 705
		lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
	}
706

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
					 MLXSW_SP_L3_PROTO_IPV4);
	if (IS_ERR(lpm_tree)) {
		err = PTR_ERR(lpm_tree);
		goto err_ipv4_tree_get;
	}
	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;

	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
					 MLXSW_SP_L3_PROTO_IPV6);
	if (IS_ERR(lpm_tree)) {
		err = PTR_ERR(lpm_tree);
		goto err_ipv6_tree_get;
	}
	mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;

723
	return 0;
724 725 726 727 728 729 730

err_ipv6_tree_get:
	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
err_ipv4_tree_get:
	kfree(mlxsw_sp->router->lpm.trees);
	return err;
731 732 733 734
}

static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
{
735 736 737 738 739 740 741 742
	struct mlxsw_sp_lpm_tree *lpm_tree;

	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);

	lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);

743
	kfree(mlxsw_sp->router->lpm.trees);
744 745
}

746 747
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
748 749 750
	return !!vr->fib4 || !!vr->fib6 ||
	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
	       !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
751 752
}

753 754 755 756 757
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
	struct mlxsw_sp_vr *vr;
	int i;

J
Jiri Pirko 已提交
758
	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
759
		vr = &mlxsw_sp->router->vrs[i];
760
		if (!mlxsw_sp_vr_is_used(vr))
761 762 763 764 765 766
			return vr;
	}
	return NULL;
}

static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
767
				     const struct mlxsw_sp_fib *fib, u8 tree_id)
768
{
769
	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
770

771 772 773 774
	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
			      (enum mlxsw_reg_ralxx_protocol) fib->proto,
			      tree_id);
	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
775 776 777
}

static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
778
				       const struct mlxsw_sp_fib *fib)
779
{
780
	char xraltb_pl[MLXSW_REG_XRALTB_LEN];
781 782

	/* Bind to tree 0 which is default */
783 784 785
	mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
			      (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
	return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
786 787 788 789
}

static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
{
790 791
	/* For our purpose, squash main, default and local tables into one */
	if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
792 793 794 795 796
		tb_id = RT_TABLE_MAIN;
	return tb_id;
}

static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
797
					    u32 tb_id)
798 799 800 801 802
{
	struct mlxsw_sp_vr *vr;
	int i;

	tb_id = mlxsw_sp_fix_tb_id(tb_id);
803

J
Jiri Pirko 已提交
804
	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
805
		vr = &mlxsw_sp->router->vrs[i];
806
		if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
807 808 809 810 811
			return vr;
	}
	return NULL;
}

812 813 814 815
int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
				u16 *vr_id)
{
	struct mlxsw_sp_vr *vr;
816
	int err = 0;
817

818
	mutex_lock(&mlxsw_sp->router->lock);
819
	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
820 821 822 823
	if (!vr) {
		err = -ESRCH;
		goto out;
	}
824
	*vr_id = vr->id;
825 826 827
out:
	mutex_unlock(&mlxsw_sp->router->lock);
	return err;
828 829
}

830 831 832 833 834 835 836
static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
					    enum mlxsw_sp_l3proto proto)
{
	switch (proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		return vr->fib4;
	case MLXSW_SP_L3_PROTO_IPV6:
837
		return vr->fib6;
838 839 840 841
	}
	return NULL;
}

842
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
843 844
					      u32 tb_id,
					      struct netlink_ext_ack *extack)
845
{
846
	struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
847 848
	struct mlxsw_sp_fib *fib4;
	struct mlxsw_sp_fib *fib6;
849
	struct mlxsw_sp_vr *vr;
850
	int err;
851 852

	vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
853
	if (!vr) {
854
		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
855
		return ERR_PTR(-EBUSY);
856
	}
857 858 859 860 861 862
	fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
	if (IS_ERR(fib4))
		return ERR_CAST(fib4);
	fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
	if (IS_ERR(fib6)) {
		err = PTR_ERR(fib6);
863 864
		goto err_fib6_create;
	}
865 866 867 868
	mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
					     MLXSW_SP_L3_PROTO_IPV4);
	if (IS_ERR(mr4_table)) {
		err = PTR_ERR(mr4_table);
869
		goto err_mr4_table_create;
870
	}
871 872 873 874 875 876 877
	mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
					     MLXSW_SP_L3_PROTO_IPV6);
	if (IS_ERR(mr6_table)) {
		err = PTR_ERR(mr6_table);
		goto err_mr6_table_create;
	}

878 879
	vr->fib4 = fib4;
	vr->fib6 = fib6;
880 881
	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
882 883
	vr->tb_id = tb_id;
	return vr;
884

885 886 887
err_mr6_table_create:
	mlxsw_sp_mr_table_destroy(mr4_table);
err_mr4_table_create:
888
	mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
889
err_fib6_create:
890
	mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
891
	return ERR_PTR(err);
892 893
}

894 895
static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_vr *vr)
896
{
897 898 899 900
	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
	vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
	mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
	vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
901
	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
902
	vr->fib6 = NULL;
903
	mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
904
	vr->fib4 = NULL;
905 906
}

907 908
static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
					   struct netlink_ext_ack *extack)
909 910 911 912
{
	struct mlxsw_sp_vr *vr;

	tb_id = mlxsw_sp_fix_tb_id(tb_id);
913 914
	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
	if (!vr)
915
		vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
916 917 918
	return vr;
}

919
static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
920
{
921
	if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
922
	    list_empty(&vr->fib6->node_list) &&
923 924
	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
	    mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
925
		mlxsw_sp_vr_destroy(mlxsw_sp, vr);
926 927
}

928 929 930 931 932 933 934 935
static bool
mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
				    enum mlxsw_sp_l3proto proto, u8 tree_id)
{
	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);

	if (!mlxsw_sp_vr_is_used(vr))
		return false;
936
	if (fib->lpm_tree->id == tree_id)
937 938 939 940 941 942 943 944 945 946 947 948 949
		return true;
	return false;
}

static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_fib *fib,
					struct mlxsw_sp_lpm_tree *new_tree)
{
	struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
	int err;

	fib->lpm_tree = new_tree;
	mlxsw_sp_lpm_tree_hold(new_tree);
950 951 952
	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
	if (err)
		goto err_tree_bind;
953 954
	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
	return 0;
955 956 957 958 959

err_tree_bind:
	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
	fib->lpm_tree = old_tree;
	return err;
960 961 962 963 964 965 966
}

static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_fib *fib,
					 struct mlxsw_sp_lpm_tree *new_tree)
{
	enum mlxsw_sp_l3proto proto = fib->proto;
967
	struct mlxsw_sp_lpm_tree *old_tree;
968 969 970 971
	u8 old_id, new_id = new_tree->id;
	struct mlxsw_sp_vr *vr;
	int i, err;

972
	old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
973 974 975 976 977 978 979 980 981 982 983 984 985
	old_id = old_tree->id;

	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
		vr = &mlxsw_sp->router->vrs[i];
		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
			continue;
		err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
						   mlxsw_sp_vr_fib(vr, proto),
						   new_tree);
		if (err)
			goto err_tree_replace;
	}

986 987 988 989 990
	memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
	       sizeof(new_tree->prefix_ref_count));
	mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
	mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	return 0;

err_tree_replace:
	for (i--; i >= 0; i--) {
		if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
			continue;
		mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
					     mlxsw_sp_vr_fib(vr, proto),
					     old_tree);
	}
	return err;
}

1004
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1005 1006
{
	struct mlxsw_sp_vr *vr;
J
Jiri Pirko 已提交
1007
	u64 max_vrs;
1008 1009
	int i;

J
Jiri Pirko 已提交
1010
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1011 1012
		return -EIO;

J
Jiri Pirko 已提交
1013
	max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1014 1015 1016
	mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
					GFP_KERNEL);
	if (!mlxsw_sp->router->vrs)
1017 1018
		return -ENOMEM;

J
Jiri Pirko 已提交
1019
	for (i = 0; i < max_vrs; i++) {
1020
		vr = &mlxsw_sp->router->vrs[i];
1021 1022
		vr->id = i;
	}
1023 1024 1025 1026

	return 0;
}

1027 1028
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);

1029 1030
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
{
1031 1032 1033 1034 1035 1036 1037 1038
	/* At this stage we're guaranteed not to have new incoming
	 * FIB notifications and the work queue is free from FIBs
	 * sitting on top of mlxsw netdevs. However, we can still
	 * have other FIBs queued. Flush the queue before flushing
	 * the device's tables. No need for locks, as we're the only
	 * writer.
	 */
	mlxsw_core_flush_owq();
1039
	mlxsw_sp_router_fib_flush(mlxsw_sp);
1040
	kfree(mlxsw_sp->router->vrs);
1041 1042
}

1043 1044 1045 1046 1047 1048
static struct net_device *
__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
{
	struct ip_tunnel *tun = netdev_priv(ol_dev);
	struct net *net = dev_net(ol_dev);

1049
	return dev_get_by_index_rcu(net, tun->parms.link);
1050 1051
}

1052
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1053
{
1054 1055
	struct net_device *d;
	u32 tb_id;
1056

1057 1058
	rcu_read_lock();
	d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1059
	if (d)
1060
		tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1061
	else
1062 1063 1064 1065
		tb_id = RT_TABLE_MAIN;
	rcu_read_unlock();

	return tb_id;
1066 1067
}

1068 1069
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1070 1071
		    const struct mlxsw_sp_rif_params *params,
		    struct netlink_ext_ack *extack);
1072 1073 1074 1075

static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
				enum mlxsw_sp_ipip_type ipipt,
1076 1077
				struct net_device *ol_dev,
				struct netlink_ext_ack *extack)
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
{
	struct mlxsw_sp_rif_params_ipip_lb lb_params;
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	struct mlxsw_sp_rif *rif;

	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
	lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
		.common.dev = ol_dev,
		.common.lag = false,
		.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
	};

1090
	rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	if (IS_ERR(rif))
		return ERR_CAST(rif);
	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
}

static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
			  enum mlxsw_sp_ipip_type ipipt,
			  struct net_device *ol_dev)
{
1101
	const struct mlxsw_sp_ipip_ops *ipip_ops;
1102 1103 1104
	struct mlxsw_sp_ipip_entry *ipip_entry;
	struct mlxsw_sp_ipip_entry *ret = NULL;

1105
	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1106 1107 1108 1109 1110
	ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
	if (!ipip_entry)
		return ERR_PTR(-ENOMEM);

	ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1111
							    ol_dev, NULL);
1112 1113 1114 1115 1116 1117 1118
	if (IS_ERR(ipip_entry->ol_lb)) {
		ret = ERR_CAST(ipip_entry->ol_lb);
		goto err_ol_ipip_lb_create;
	}

	ipip_entry->ipipt = ipipt;
	ipip_entry->ol_dev = ol_dev;
1119 1120 1121 1122 1123 1124 1125 1126 1127

	switch (ipip_ops->ul_proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		WARN_ON(1);
		break;
	}
1128 1129 1130 1131 1132 1133 1134 1135 1136

	return ipip_entry;

err_ol_ipip_lb_create:
	kfree(ipip_entry);
	return ret;
}

static void
1137
mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
{
	mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
	kfree(ipip_entry);
}

static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
				  const enum mlxsw_sp_l3proto ul_proto,
				  union mlxsw_sp_l3addr saddr,
				  u32 ul_tb_id,
				  struct mlxsw_sp_ipip_entry *ipip_entry)
{
	u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
	union mlxsw_sp_l3addr tun_saddr;

	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
		return false;

	tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
	return tun_ul_tb_id == ul_tb_id &&
	       mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
}

1162 1163 1164 1165 1166 1167 1168 1169
static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_fib_entry *fib_entry,
			      struct mlxsw_sp_ipip_entry *ipip_entry)
{
	u32 tunnel_index;
	int err;

1170 1171
	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
				  1, &tunnel_index);
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	if (err)
		return err;

	ipip_entry->decap_fib_entry = fib_entry;
	fib_entry->decap.ipip_entry = ipip_entry;
	fib_entry->decap.tunnel_index = tunnel_index;
	return 0;
}

static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_fib_entry *fib_entry)
{
	/* Unlink this node from the IPIP entry that it's the decap entry of. */
	fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
	fib_entry->decap.ipip_entry = NULL;
1187
	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1188
			   1, fib_entry->decap.tunnel_index);
1189 1190
}

1191 1192 1193
static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
			 size_t addr_len, unsigned char prefix_len);
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_fib_entry *fib_entry);

static void
mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_ipip_entry *ipip_entry)
{
	struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;

	mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;

	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
}

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
static void
mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_ipip_entry *ipip_entry,
				  struct mlxsw_sp_fib_entry *decap_fib_entry)
{
	if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
					  ipip_entry))
		return;
	decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;

	if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
}

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
static struct mlxsw_sp_fib_entry *
mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
				     enum mlxsw_sp_l3proto proto,
				     const union mlxsw_sp_l3addr *addr,
				     enum mlxsw_sp_fib_entry_type type)
{
	struct mlxsw_sp_fib_node *fib_node;
	unsigned char addr_prefix_len;
	struct mlxsw_sp_fib *fib;
	struct mlxsw_sp_vr *vr;
	const void *addrp;
	size_t addr_len;
	u32 addr4;

	vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
	if (!vr)
		return NULL;
	fib = mlxsw_sp_vr_fib(vr, proto);

	switch (proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		addr4 = be32_to_cpu(addr->addr4);
		addrp = &addr4;
		addr_len = 4;
		addr_prefix_len = 32;
		break;
1249
	case MLXSW_SP_L3_PROTO_IPV6:
1250 1251 1252 1253 1254 1255 1256
	default:
		WARN_ON(1);
		return NULL;
	}

	fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
					    addr_prefix_len);
1257
	if (!fib_node || fib_node->fib_entry->type != type)
1258 1259
		return NULL;

1260
	return fib_node->fib_entry;
1261 1262
}

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
/* Given an IPIP entry, find the corresponding decap route. */
static struct mlxsw_sp_fib_entry *
mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_ipip_entry *ipip_entry)
{
	static struct mlxsw_sp_fib_node *fib_node;
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	unsigned char saddr_prefix_len;
	union mlxsw_sp_l3addr saddr;
	struct mlxsw_sp_fib *ul_fib;
	struct mlxsw_sp_vr *ul_vr;
	const void *saddrp;
	size_t saddr_len;
	u32 ul_tb_id;
	u32 saddr4;

	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];

	ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
	ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
	if (!ul_vr)
		return NULL;

	ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
	saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
					   ipip_entry->ol_dev);

	switch (ipip_ops->ul_proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		saddr4 = be32_to_cpu(saddr.addr4);
		saddrp = &saddr4;
		saddr_len = 4;
		saddr_prefix_len = 32;
		break;
1297
	default:
1298 1299 1300 1301 1302 1303
		WARN_ON(1);
		return NULL;
	}

	fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
					    saddr_prefix_len);
1304 1305
	if (!fib_node ||
	    fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1306 1307
		return NULL;

1308
	return fib_node->fib_entry;
1309 1310
}

1311
static struct mlxsw_sp_ipip_entry *
1312 1313 1314
mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
			   enum mlxsw_sp_ipip_type ipipt,
			   struct net_device *ol_dev)
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
	if (IS_ERR(ipip_entry))
		return ipip_entry;

	list_add_tail(&ipip_entry->ipip_list_node,
		      &mlxsw_sp->router->ipip_list);

	return ipip_entry;
}

static void
1329 1330
mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_ipip_entry *ipip_entry)
1331
{
1332 1333
	list_del(&ipip_entry->ipip_list_node);
	mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1334 1335
}

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
static bool
mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
				  const struct net_device *ul_dev,
				  enum mlxsw_sp_l3proto ul_proto,
				  union mlxsw_sp_l3addr ul_dip,
				  struct mlxsw_sp_ipip_entry *ipip_entry)
{
	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
	enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;

	if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
		return false;

	return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1350
						 ul_tb_id, ipip_entry);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
}

/* Given decap parameters, find the corresponding IPIP entry. */
static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
				  const struct net_device *ul_dev,
				  enum mlxsw_sp_l3proto ul_proto,
				  union mlxsw_sp_l3addr ul_dip)
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
			    ipip_list_node)
		if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
						      ul_proto, ul_dip,
						      ipip_entry))
			return ipip_entry;

	return NULL;
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
				      const struct net_device *dev,
				      enum mlxsw_sp_ipip_type *p_type)
{
	struct mlxsw_sp_router *router = mlxsw_sp->router;
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	enum mlxsw_sp_ipip_type ipipt;

	for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
		ipip_ops = router->ipip_ops_arr[ipipt];
		if (dev->type == ipip_ops->dev_type) {
			if (p_type)
				*p_type = ipipt;
			return true;
		}
	}
	return false;
}

1391 1392
bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
				const struct net_device *dev)
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
{
	return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
}

static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
				   const struct net_device *ol_dev)
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
			    ipip_list_node)
		if (ipip_entry->ol_dev == ol_dev)
			return ipip_entry;

	return NULL;
}

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
static struct mlxsw_sp_ipip_entry *
mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
				   const struct net_device *ul_dev,
				   struct mlxsw_sp_ipip_entry *start)
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
					ipip_list_node);
	list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
				     ipip_list_node) {
1422 1423 1424 1425 1426 1427
		struct net_device *ol_dev = ipip_entry->ol_dev;
		struct net_device *ipip_ul_dev;

		rcu_read_lock();
		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
		rcu_read_unlock();
1428 1429 1430 1431 1432 1433 1434 1435

		if (ipip_ul_dev == ul_dev)
			return ipip_entry;
	}

	return NULL;
}

1436
bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1437 1438
				const struct net_device *dev)
{
1439 1440 1441 1442 1443 1444 1445
	bool is_ipip_ul;

	mutex_lock(&mlxsw_sp->router->lock);
	is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
	mutex_unlock(&mlxsw_sp->router->lock);

	return is_ipip_ul;
1446 1447
}

1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
						const struct net_device *ol_dev,
						enum mlxsw_sp_ipip_type ipipt)
{
	const struct mlxsw_sp_ipip_ops *ops
		= mlxsw_sp->router->ipip_ops_arr[ipipt];

	/* For deciding whether decap should be offloaded, we don't care about
	 * overlay protocol, so ask whether either one is supported.
	 */
	return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
	       ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
}

1462 1463
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
						struct net_device *ol_dev)
1464
{
1465
	enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1466
	struct mlxsw_sp_ipip_entry *ipip_entry;
1467 1468 1469
	enum mlxsw_sp_l3proto ul_proto;
	union mlxsw_sp_l3addr saddr;
	u32 ul_tb_id;
1470 1471

	mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1472
	if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
		ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
		ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
		saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
		if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
							  saddr, ul_tb_id,
							  NULL)) {
			ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
								ol_dev);
			if (IS_ERR(ipip_entry))
				return PTR_ERR(ipip_entry);
		}
1484 1485 1486 1487 1488
	}

	return 0;
}

1489 1490
static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
						   struct net_device *ol_dev)
1491 1492 1493 1494 1495
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
	if (ipip_entry)
1496
		mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1497 1498
}

1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
static void
mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_ipip_entry *ipip_entry)
{
	struct mlxsw_sp_fib_entry *decap_fib_entry;

	decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
	if (decap_fib_entry)
		mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
						  decap_fib_entry);
}

1511
static int
1512 1513
mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
			u16 ul_rif_id, bool enable)
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
{
	struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
	struct mlxsw_sp_rif *rif = &lb_rif->common;
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	char ritr_pl[MLXSW_REG_RITR_LEN];
	u32 saddr4;

	switch (lb_cf.ul_protocol) {
	case MLXSW_SP_L3_PROTO_IPV4:
		saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
		mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
				    rif->rif_index, rif->vr_id, rif->dev->mtu);
		mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
			    MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1528
			    ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1529 1530 1531 1532 1533 1534 1535 1536 1537
		break;

	case MLXSW_SP_L3_PROTO_IPV6:
		return -EAFNOSUPPORT;
	}

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
						 struct net_device *ol_dev)
{
	struct mlxsw_sp_ipip_entry *ipip_entry;
	struct mlxsw_sp_rif_ipip_lb *lb_rif;
	int err = 0;

	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
	if (ipip_entry) {
		lb_rif = ipip_entry->ol_lb;
1548 1549
		err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
					      lb_rif->ul_rif_id, true);
1550 1551 1552 1553 1554 1555 1556 1557 1558
		if (err)
			goto out;
		lb_rif->common.mtu = ol_dev->mtu;
	}

out:
	return err;
}

1559 1560
static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
						struct net_device *ol_dev)
1561 1562 1563 1564
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1565 1566
	if (ipip_entry)
		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1567 1568
}

1569 1570 1571 1572 1573 1574 1575 1576
static void
mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_ipip_entry *ipip_entry)
{
	if (ipip_entry->decap_fib_entry)
		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
}

1577 1578
static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
						  struct net_device *ol_dev)
1579 1580 1581 1582
{
	struct mlxsw_sp_ipip_entry *ipip_entry;

	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1583 1584
	if (ipip_entry)
		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1585 1586
}

1587 1588 1589
static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_rif *old_rif,
					 struct mlxsw_sp_rif *new_rif);
1590 1591 1592
static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_ipip_entry *ipip_entry,
1593
				 bool keep_encap,
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
				 struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
	struct mlxsw_sp_rif_ipip_lb *new_lb_rif;

	new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
						     ipip_entry->ipipt,
						     ipip_entry->ol_dev,
						     extack);
	if (IS_ERR(new_lb_rif))
		return PTR_ERR(new_lb_rif);
	ipip_entry->ol_lb = new_lb_rif;
1606

1607 1608 1609
	if (keep_encap)
		mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
					     &new_lb_rif->common);
1610

1611
	mlxsw_sp_rif_destroy(&old_lb_rif->common);
1612

1613 1614 1615
	return 0;
}

1616 1617 1618
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_rif *rif);

1619
/**
1620 1621 1622 1623 1624
 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
 * @mlxsw_sp: mlxsw_sp.
 * @ipip_entry: IPIP entry.
 * @recreate_loopback: Recreates the associated loopback RIF.
 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1625
 *              relevant when recreate_loopback is true.
1626
 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1627
 *                   is only relevant when recreate_loopback is false.
1628 1629 1630
 * @extack: extack.
 *
 * Return: Non-zero value on failure.
1631
 */
1632 1633
int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_ipip_entry *ipip_entry,
1634 1635 1636
					bool recreate_loopback,
					bool keep_encap,
					bool update_nexthops,
1637 1638 1639
					struct netlink_ext_ack *extack)
{
	int err;
1640

1641 1642 1643 1644
	/* RIFs can't be edited, so to update loopback, we need to destroy and
	 * recreate it. That creates a window of opportunity where RALUE and
	 * RATR registers end up referencing a RIF that's already gone. RATRs
	 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1645 1646 1647 1648 1649
	 * of RALUE, demote the decap route back.
	 */
	if (ipip_entry->decap_fib_entry)
		mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);

1650 1651 1652 1653 1654 1655 1656 1657 1658
	if (recreate_loopback) {
		err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
						       keep_encap, extack);
		if (err)
			return err;
	} else if (update_nexthops) {
		mlxsw_sp_nexthop_rif_update(mlxsw_sp,
					    &ipip_entry->ol_lb->common);
	}
1659 1660 1661

	if (ipip_entry->ol_dev->flags & IFF_UP)
		mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1662 1663 1664 1665

	return 0;
}

1666 1667 1668 1669 1670 1671 1672 1673 1674
static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
						struct net_device *ol_dev,
						struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_ipip_entry *ipip_entry =
		mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);

	if (!ipip_entry)
		return 0;
1675

1676
	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1677
						   true, false, false, extack);
1678 1679
}

1680 1681 1682 1683
static int
mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_ipip_entry *ipip_entry,
				     struct net_device *ul_dev,
1684
				     bool *demote_this,
1685 1686
				     struct netlink_ext_ack *extack)
{
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
	enum mlxsw_sp_l3proto ul_proto;
	union mlxsw_sp_l3addr saddr;

	/* Moving underlay to a different VRF might cause local address
	 * conflict, and the conflicting tunnels need to be demoted.
	 */
	ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
	saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
	if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
						 saddr, ul_tb_id,
						 ipip_entry)) {
		*demote_this = true;
		return 0;
	}

1703 1704 1705 1706
	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
						   true, true, false, extack);
}

1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
static int
mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_ipip_entry *ipip_entry,
				    struct net_device *ul_dev)
{
	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
						   false, false, true, NULL);
}

static int
mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_ipip_entry *ipip_entry,
				      struct net_device *ul_dev)
{
	/* A down underlay device causes encapsulated packets to not be
	 * forwarded, but decap still works. So refresh next hops without
	 * touching anything else.
	 */
	return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
						   false, false, true, NULL);
}

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
static int
mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
					struct net_device *ol_dev,
					struct netlink_ext_ack *extack)
{
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	struct mlxsw_sp_ipip_entry *ipip_entry;
	int err;

	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
	if (!ipip_entry)
		/* A change might make a tunnel eligible for offloading, but
		 * that is currently not implemented. What falls to slow path
		 * stays there.
		 */
		return 0;

	/* A change might make a tunnel not eligible for offloading. */
	if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
						 ipip_entry->ipipt)) {
		mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
		return 0;
	}

	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
	err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
	return err;
}

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_ipip_entry *ipip_entry)
{
	struct net_device *ol_dev = ipip_entry->ol_dev;

	if (ol_dev->flags & IFF_UP)
		mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
	mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
}

/* The configuration where several tunnels have the same local address in the
 * same underlay table needs special treatment in the HW. That is currently not
 * implemented in the driver. This function finds and demotes the first tunnel
 * with a given source address, except the one passed in in the argument
 * `except'.
 */
bool
mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
				     enum mlxsw_sp_l3proto ul_proto,
				     union mlxsw_sp_l3addr saddr,
				     u32 ul_tb_id,
				     const struct mlxsw_sp_ipip_entry *except)
{
	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;

	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
				 ipip_list_node) {
		if (ipip_entry != except &&
		    mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
						      ul_tb_id, ipip_entry)) {
			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
			return true;
		}
	}

	return false;
}

1796 1797 1798 1799 1800 1801 1802
static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
						     struct net_device *ul_dev)
{
	struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;

	list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
				 ipip_list_node) {
1803 1804
		struct net_device *ol_dev = ipip_entry->ol_dev;
		struct net_device *ipip_ul_dev;
1805

1806 1807 1808
		rcu_read_lock();
		ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
		rcu_read_unlock();
1809 1810 1811 1812 1813
		if (ipip_ul_dev == ul_dev)
			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
	}
}

1814 1815 1816 1817
int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
				     struct net_device *ol_dev,
				     unsigned long event,
				     struct netdev_notifier_info *info)
1818
{
1819 1820
	struct netdev_notifier_changeupper_info *chup;
	struct netlink_ext_ack *extack;
1821
	int err = 0;
1822

1823
	mutex_lock(&mlxsw_sp->router->lock);
1824 1825
	switch (event) {
	case NETDEV_REGISTER:
1826 1827
		err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
		break;
1828
	case NETDEV_UNREGISTER:
1829
		mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1830
		break;
1831
	case NETDEV_UP:
1832
		mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1833
		break;
1834
	case NETDEV_DOWN:
1835
		mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1836
		break;
1837
	case NETDEV_CHANGEUPPER:
1838 1839 1840
		chup = container_of(info, typeof(*chup), info);
		extack = info->extack;
		if (netif_is_l3_master(chup->upper_dev))
1841 1842 1843 1844
			err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
								   ol_dev,
								   extack);
		break;
1845 1846
	case NETDEV_CHANGE:
		extack = info->extack;
1847 1848 1849
		err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
							      ol_dev, extack);
		break;
1850
	case NETDEV_CHANGEMTU:
1851 1852
		err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
		break;
1853
	}
1854 1855
	mutex_unlock(&mlxsw_sp->router->lock);
	return err;
1856 1857
}

1858 1859 1860 1861
static int
__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_ipip_entry *ipip_entry,
				   struct net_device *ul_dev,
1862
				   bool *demote_this,
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
				   unsigned long event,
				   struct netdev_notifier_info *info)
{
	struct netdev_notifier_changeupper_info *chup;
	struct netlink_ext_ack *extack;

	switch (event) {
	case NETDEV_CHANGEUPPER:
		chup = container_of(info, typeof(*chup), info);
		extack = info->extack;
		if (netif_is_l3_master(chup->upper_dev))
			return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
								    ipip_entry,
								    ul_dev,
1877
								    demote_this,
1878 1879
								    extack);
		break;
1880 1881 1882 1883 1884 1885 1886 1887

	case NETDEV_UP:
		return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
							   ul_dev);
	case NETDEV_DOWN:
		return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
							     ipip_entry,
							     ul_dev);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
	}
	return 0;
}

int
mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
				 struct net_device *ul_dev,
				 unsigned long event,
				 struct netdev_notifier_info *info)
{
	struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1899
	int err = 0;
1900

1901
	mutex_lock(&mlxsw_sp->router->lock);
1902 1903 1904
	while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
								ul_dev,
								ipip_entry))) {
1905 1906 1907
		struct mlxsw_sp_ipip_entry *prev;
		bool demote_this = false;

1908
		err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1909 1910
							 ul_dev, &demote_this,
							 event, info);
1911 1912 1913
		if (err) {
			mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
								 ul_dev);
1914
			break;
1915
		}
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929

		if (demote_this) {
			if (list_is_first(&ipip_entry->ipip_list_node,
					  &mlxsw_sp->router->ipip_list))
				prev = NULL;
			else
				/* This can't be cached from previous iteration,
				 * because that entry could be gone now.
				 */
				prev = list_prev_entry(ipip_entry,
						       ipip_list_node);
			mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
			ipip_entry = prev;
		}
1930
	}
1931
	mutex_unlock(&mlxsw_sp->router->lock);
1932

1933
	return err;
1934 1935
}

1936 1937 1938 1939 1940 1941
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
				      enum mlxsw_sp_l3proto ul_proto,
				      const union mlxsw_sp_l3addr *ul_sip,
				      u32 tunnel_index)
{
	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1942
	struct mlxsw_sp_router *router = mlxsw_sp->router;
1943
	struct mlxsw_sp_fib_entry *fib_entry;
1944
	int err = 0;
1945

1946 1947 1948 1949 1950 1951
	mutex_lock(&mlxsw_sp->router->lock);

	if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
		err = -EINVAL;
		goto out;
	}
1952 1953 1954 1955 1956 1957 1958

	router->nve_decap_config.ul_tb_id = ul_tb_id;
	router->nve_decap_config.tunnel_index = tunnel_index;
	router->nve_decap_config.ul_proto = ul_proto;
	router->nve_decap_config.ul_sip = *ul_sip;
	router->nve_decap_config.valid = true;

1959 1960 1961 1962 1963 1964 1965
	/* It is valid to create a tunnel with a local IP and only later
	 * assign this IP address to a local interface
	 */
	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
							 ul_proto, ul_sip,
							 type);
	if (!fib_entry)
1966
		goto out;
1967 1968 1969 1970 1971 1972 1973 1974

	fib_entry->decap.tunnel_index = tunnel_index;
	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;

	err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
	if (err)
		goto err_fib_entry_update;

1975
	goto out;
1976 1977 1978 1979

err_fib_entry_update:
	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1980 1981
out:
	mutex_unlock(&mlxsw_sp->router->lock);
1982 1983 1984 1985 1986 1987 1988 1989
	return err;
}

void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
				      enum mlxsw_sp_l3proto ul_proto,
				      const union mlxsw_sp_l3addr *ul_sip)
{
	enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1990
	struct mlxsw_sp_router *router = mlxsw_sp->router;
1991 1992
	struct mlxsw_sp_fib_entry *fib_entry;

1993 1994
	mutex_lock(&mlxsw_sp->router->lock);

1995
	if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1996
		goto out;
1997 1998 1999

	router->nve_decap_config.valid = false;

2000 2001 2002 2003
	fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
							 ul_proto, ul_sip,
							 type);
	if (!fib_entry)
2004
		goto out;
2005 2006 2007

	fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
	mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2008 2009
out:
	mutex_unlock(&mlxsw_sp->router->lock);
2010 2011
}

2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
					 u32 ul_tb_id,
					 enum mlxsw_sp_l3proto ul_proto,
					 const union mlxsw_sp_l3addr *ul_sip)
{
	struct mlxsw_sp_router *router = mlxsw_sp->router;

	return router->nve_decap_config.valid &&
	       router->nve_decap_config.ul_tb_id == ul_tb_id &&
	       router->nve_decap_config.ul_proto == ul_proto &&
	       !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
		       sizeof(*ul_sip));
}

2026
struct mlxsw_sp_neigh_key {
2027
	struct neighbour *n;
2028 2029 2030
};

struct mlxsw_sp_neigh_entry {
2031
	struct list_head rif_list_node;
2032 2033 2034
	struct rhash_head ht_node;
	struct mlxsw_sp_neigh_key key;
	u16 rif;
2035
	bool connected;
2036
	unsigned char ha[ETH_ALEN];
2037 2038 2039
	struct list_head nexthop_list; /* list of nexthops using
					* this neigh entry
					*/
2040
	struct list_head nexthop_neighs_list_node;
2041 2042
	unsigned int counter_index;
	bool counter_valid;
2043 2044 2045 2046 2047 2048 2049 2050
};

static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
	.key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
	.head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
	.key_len = sizeof(struct mlxsw_sp_neigh_key),
};

2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
struct mlxsw_sp_neigh_entry *
mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
			struct mlxsw_sp_neigh_entry *neigh_entry)
{
	if (!neigh_entry) {
		if (list_empty(&rif->neigh_list))
			return NULL;
		else
			return list_first_entry(&rif->neigh_list,
						typeof(*neigh_entry),
						rif_list_node);
	}
2063
	if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
		return NULL;
	return list_next_entry(neigh_entry, rif_list_node);
}

int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
{
	return neigh_entry->key.n->tbl->family;
}

unsigned char *
mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
{
	return neigh_entry->ha;
}

u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
{
	struct neighbour *n;

	n = neigh_entry->key.n;
	return ntohl(*((__be32 *) n->primary_key));
}

2087 2088 2089 2090 2091 2092 2093 2094 2095
struct in6_addr *
mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
{
	struct neighbour *n;

	n = neigh_entry->key.n;
	return (struct in6_addr *) &n->primary_key;
}

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_neigh_entry *neigh_entry,
			       u64 *p_counter)
{
	if (!neigh_entry->counter_valid)
		return -EINVAL;

	return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
					 p_counter, NULL);
}

2107
static struct mlxsw_sp_neigh_entry *
2108 2109
mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
			   u16 rif)
2110 2111 2112
{
	struct mlxsw_sp_neigh_entry *neigh_entry;

2113
	neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2114 2115
	if (!neigh_entry)
		return NULL;
2116

2117
	neigh_entry->key.n = n;
2118
	neigh_entry->rif = rif;
2119
	INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2120

2121 2122 2123
	return neigh_entry;
}

2124
static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2125 2126 2127 2128
{
	kfree(neigh_entry);
}

2129 2130 2131
static int
mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_neigh_entry *neigh_entry)
2132
{
2133
	return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2134 2135 2136
				      &neigh_entry->ht_node,
				      mlxsw_sp_neigh_ht_params);
}
2137

2138 2139 2140 2141
static void
mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_neigh_entry *neigh_entry)
{
2142
	rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2143 2144
			       &neigh_entry->ht_node,
			       mlxsw_sp_neigh_ht_params);
2145 2146
}

2147
static bool
2148 2149
mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_neigh_entry *neigh_entry)
2150 2151
{
	struct devlink *devlink;
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	const char *table_name;

	switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
	case AF_INET:
		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
		break;
	case AF_INET6:
		table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
		break;
	default:
		WARN_ON(1);
		return false;
	}
2165 2166

	devlink = priv_to_devlink(mlxsw_sp->core);
2167
	return devlink_dpipe_table_counter_enabled(devlink, table_name);
2168 2169 2170 2171 2172 2173
}

static void
mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_neigh_entry *neigh_entry)
{
2174
	if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
		return;

	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
		return;

	neigh_entry->counter_valid = true;
}

static void
mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_neigh_entry *neigh_entry)
{
	if (!neigh_entry->counter_valid)
		return;
	mlxsw_sp_flow_counter_free(mlxsw_sp,
				   neigh_entry->counter_index);
	neigh_entry->counter_valid = false;
}

2194 2195
static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2196 2197
{
	struct mlxsw_sp_neigh_entry *neigh_entry;
2198
	struct mlxsw_sp_rif *rif;
2199 2200
	int err;

2201 2202
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
	if (!rif)
2203
		return ERR_PTR(-EINVAL);
2204

2205
	neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2206
	if (!neigh_entry)
2207 2208
		return ERR_PTR(-ENOMEM);

2209 2210 2211
	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
	if (err)
		goto err_neigh_entry_insert;
2212

2213
	mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2214
	list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2215

2216
	return neigh_entry;
2217 2218

err_neigh_entry_insert:
2219 2220
	mlxsw_sp_neigh_entry_free(neigh_entry);
	return ERR_PTR(err);
2221 2222
}

2223 2224 2225
static void
mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
			     struct mlxsw_sp_neigh_entry *neigh_entry)
2226
{
2227
	list_del(&neigh_entry->rif_list_node);
2228
	mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2229 2230 2231
	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
	mlxsw_sp_neigh_entry_free(neigh_entry);
}
2232

2233 2234 2235 2236
static struct mlxsw_sp_neigh_entry *
mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
{
	struct mlxsw_sp_neigh_key key;
2237

2238
	key.n = n;
2239
	return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2240
				      &key, mlxsw_sp_neigh_ht_params);
2241 2242
}

2243 2244 2245
static void
mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
{
2246
	unsigned long interval;
2247

2248
#if IS_ENABLED(CONFIG_IPV6)
2249 2250 2251
	interval = min_t(unsigned long,
			 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
			 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2252 2253 2254
#else
	interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
#endif
2255
	mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
}

static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
						   char *rauhtd_pl,
						   int ent_index)
{
	struct net_device *dev;
	struct neighbour *n;
	__be32 dipn;
	u32 dip;
	u16 rif;

	mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);

2270
	if (!mlxsw_sp->router->rifs[rif]) {
2271 2272 2273 2274 2275
		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
		return;
	}

	dipn = htonl(dip);
2276
	dev = mlxsw_sp->router->rifs[rif]->dev;
2277
	n = neigh_lookup(&arp_tbl, &dipn, dev);
2278
	if (!n)
2279 2280 2281 2282 2283 2284 2285
		return;

	netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
	neigh_event_send(n, NULL);
	neigh_release(n);
}

2286
#if IS_ENABLED(CONFIG_IPV6)
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
						   char *rauhtd_pl,
						   int rec_index)
{
	struct net_device *dev;
	struct neighbour *n;
	struct in6_addr dip;
	u16 rif;

	mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
					 (char *) &dip);

	if (!mlxsw_sp->router->rifs[rif]) {
		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
		return;
	}

	dev = mlxsw_sp->router->rifs[rif]->dev;
	n = neigh_lookup(&nd_tbl, &dip, dev);
2306
	if (!n)
2307 2308 2309 2310 2311 2312
		return;

	netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
	neigh_event_send(n, NULL);
	neigh_release(n);
}
2313 2314 2315 2316 2317 2318 2319
#else
static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
						   char *rauhtd_pl,
						   int rec_index)
{
}
#endif
2320

2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
						   char *rauhtd_pl,
						   int rec_index)
{
	u8 num_entries;
	int i;

	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
								rec_index);
	/* Hardware starts counting at 0, so add 1. */
	num_entries++;

	/* Each record consists of several neighbour entries. */
	for (i = 0; i < num_entries; i++) {
		int ent_index;

		ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
		mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
						       ent_index);
	}

}

2344 2345 2346 2347 2348 2349 2350 2351 2352
static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
						   char *rauhtd_pl,
						   int rec_index)
{
	/* One record contains one entry. */
	mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
					       rec_index);
}

2353 2354 2355 2356 2357 2358 2359 2360 2361
static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
					      char *rauhtd_pl, int rec_index)
{
	switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
	case MLXSW_REG_RAUHTD_TYPE_IPV4:
		mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
						       rec_index);
		break;
	case MLXSW_REG_RAUHTD_TYPE_IPV6:
2362 2363
		mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
						       rec_index);
2364 2365 2366 2367
		break;
	}
}

2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
{
	u8 num_rec, last_rec_index, num_entries;

	num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
	last_rec_index = num_rec - 1;

	if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
		return false;
	if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
	    MLXSW_REG_RAUHTD_TYPE_IPV6)
		return true;

	num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
								last_rec_index);
	if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
		return true;
	return false;
}

2388 2389 2390 2391
static int
__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
				       char *rauhtd_pl,
				       enum mlxsw_reg_rauhtd_type type)
2392
{
2393 2394
	int i, num_rec;
	int err;
2395

2396 2397
	/* Ensure the RIF we read from the device does not change mid-dump. */
	mutex_lock(&mlxsw_sp->router->lock);
2398
	do {
2399
		mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2400 2401 2402
		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
				      rauhtd_pl);
		if (err) {
P
Petr Machata 已提交
2403
			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2404 2405 2406 2407 2408 2409
			break;
		}
		num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
		for (i = 0; i < num_rec; i++)
			mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
							  i);
2410
	} while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2411
	mutex_unlock(&mlxsw_sp->router->lock);
2412

2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
	return err;
}

static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
{
	enum mlxsw_reg_rauhtd_type type;
	char *rauhtd_pl;
	int err;

	rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
	if (!rauhtd_pl)
		return -ENOMEM;

	type = MLXSW_REG_RAUHTD_TYPE_IPV4;
	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
	if (err)
		goto out;

	type = MLXSW_REG_RAUHTD_TYPE_IPV6;
	err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
out:
2434
	kfree(rauhtd_pl);
2435 2436 2437 2438 2439 2440 2441
	return err;
}

static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
{
	struct mlxsw_sp_neigh_entry *neigh_entry;

2442
	mutex_lock(&mlxsw_sp->router->lock);
2443
	list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2444
			    nexthop_neighs_list_node)
2445 2446 2447
		/* If this neigh have nexthops, make the kernel think this neigh
		 * is active regardless of the traffic.
		 */
2448
		neigh_event_send(neigh_entry->key.n, NULL);
2449
	mutex_unlock(&mlxsw_sp->router->lock);
2450 2451 2452 2453 2454
}

static void
mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
2455
	unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2456

2457
	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2458 2459 2460 2461 2462
			       msecs_to_jiffies(interval));
}

static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
{
2463
	struct mlxsw_sp_router *router;
2464 2465
	int err;

2466 2467 2468
	router = container_of(work, struct mlxsw_sp_router,
			      neighs_update.dw.work);
	err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2469
	if (err)
2470
		dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2471

2472
	mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2473

2474
	mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2475 2476
}

2477 2478 2479
static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
{
	struct mlxsw_sp_neigh_entry *neigh_entry;
2480
	struct mlxsw_sp_router *router;
2481

2482 2483
	router = container_of(work, struct mlxsw_sp_router,
			      nexthop_probe_dw.work);
2484 2485 2486 2487 2488 2489
	/* Iterate over nexthop neighbours, find those who are unresolved and
	 * send arp on them. This solves the chicken-egg problem when
	 * the nexthop wouldn't get offloaded until the neighbor is resolved
	 * but it wouldn't get resolved ever in case traffic is flowing in HW
	 * using different nexthop.
	 */
2490
	mutex_lock(&router->lock);
2491
	list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2492
			    nexthop_neighs_list_node)
2493
		if (!neigh_entry->connected)
2494
			neigh_event_send(neigh_entry->key.n, NULL);
2495
	mutex_unlock(&router->lock);
2496

2497
	mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2498 2499 2500
			       MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
}

2501 2502 2503
static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_neigh_entry *neigh_entry,
2504
			      bool removing, bool dead);
2505

2506 2507 2508 2509 2510 2511
static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
{
	return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
			MLXSW_REG_RAUHT_OP_WRITE_DELETE;
}

2512
static int
2513 2514 2515
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_neigh_entry *neigh_entry,
				enum mlxsw_reg_rauht_op op)
2516
{
2517
	struct neighbour *n = neigh_entry->key.n;
2518
	u32 dip = ntohl(*((__be32 *) n->primary_key));
2519
	char rauht_pl[MLXSW_REG_RAUHT_LEN];
2520 2521 2522

	mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
			      dip);
2523 2524 2525
	if (neigh_entry->counter_valid)
		mlxsw_reg_rauht_pack_counter(rauht_pl,
					     neigh_entry->counter_index);
2526
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2527 2528
}

2529
static int
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_neigh_entry *neigh_entry,
				enum mlxsw_reg_rauht_op op)
{
	struct neighbour *n = neigh_entry->key.n;
	char rauht_pl[MLXSW_REG_RAUHT_LEN];
	const char *dip = n->primary_key;

	mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
			      dip);
2540 2541 2542
	if (neigh_entry->counter_valid)
		mlxsw_reg_rauht_pack_counter(rauht_pl,
					     neigh_entry->counter_index);
2543
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2544 2545
}

2546
bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2547
{
2548 2549
	struct neighbour *n = neigh_entry->key.n;

2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
	/* Packets with a link-local destination address are trapped
	 * after LPM lookup and never reach the neighbour table, so
	 * there is no need to program such neighbours to the device.
	 */
	if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
	    IPV6_ADDR_LINKLOCAL)
		return true;
	return false;
}

2560 2561 2562 2563 2564
static void
mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
			    struct mlxsw_sp_neigh_entry *neigh_entry,
			    bool adding)
{
2565 2566 2567
	enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
	int err;

2568 2569 2570
	if (!adding && !neigh_entry->connected)
		return;
	neigh_entry->connected = adding;
2571
	if (neigh_entry->key.n->tbl->family == AF_INET) {
2572 2573 2574 2575
		err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
						      op);
		if (err)
			return;
2576
	} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2577
		if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2578
			return;
2579 2580 2581 2582
		err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
						      op);
		if (err)
			return;
2583
	} else {
2584
		WARN_ON_ONCE(1);
2585
		return;
2586
	}
2587 2588 2589 2590 2591

	if (adding)
		neigh_entry->key.n->flags |= NTF_OFFLOADED;
	else
		neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2592 2593
}

2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605
void
mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_neigh_entry *neigh_entry,
				    bool adding)
{
	if (adding)
		mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
	else
		mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
}

2606
struct mlxsw_sp_netevent_work {
2607 2608 2609 2610 2611 2612 2613
	struct work_struct work;
	struct mlxsw_sp *mlxsw_sp;
	struct neighbour *n;
};

static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
{
2614 2615 2616
	struct mlxsw_sp_netevent_work *net_work =
		container_of(work, struct mlxsw_sp_netevent_work, work);
	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2617
	struct mlxsw_sp_neigh_entry *neigh_entry;
2618
	struct neighbour *n = net_work->n;
2619
	unsigned char ha[ETH_ALEN];
2620
	bool entry_connected;
2621
	u8 nud_state, dead;
2622

2623 2624 2625 2626
	/* If these parameters are changed after we release the lock,
	 * then we are guaranteed to receive another event letting us
	 * know about it.
	 */
2627
	read_lock_bh(&n->lock);
2628
	memcpy(ha, n->ha, ETH_ALEN);
2629
	nud_state = n->nud_state;
2630
	dead = n->dead;
2631 2632
	read_unlock_bh(&n->lock);

2633
	mutex_lock(&mlxsw_sp->router->lock);
2634 2635
	mlxsw_sp_span_respin(mlxsw_sp);

2636
	entry_connected = nud_state & NUD_VALID && !dead;
2637 2638 2639 2640 2641 2642 2643
	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
	if (!entry_connected && !neigh_entry)
		goto out;
	if (!neigh_entry) {
		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
		if (IS_ERR(neigh_entry))
			goto out;
2644 2645
	}

2646 2647
	memcpy(neigh_entry->ha, ha, ETH_ALEN);
	mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2648 2649
	mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
				      dead);
2650 2651 2652 2653 2654

	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);

out:
2655
	mutex_unlock(&mlxsw_sp->router->lock);
2656
	neigh_release(n);
2657
	kfree(net_work);
2658 2659
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);

static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
{
	struct mlxsw_sp_netevent_work *net_work =
		container_of(work, struct mlxsw_sp_netevent_work, work);
	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;

	mlxsw_sp_mp_hash_init(mlxsw_sp);
	kfree(net_work);
}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);

static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
{
	struct mlxsw_sp_netevent_work *net_work =
		container_of(work, struct mlxsw_sp_netevent_work, work);
	struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;

	__mlxsw_sp_router_init(mlxsw_sp);
	kfree(net_work);
}

2684 2685 2686 2687 2688 2689 2690
static int mlxsw_sp_router_schedule_work(struct net *net,
					 struct notifier_block *nb,
					 void (*cb)(struct work_struct *))
{
	struct mlxsw_sp_netevent_work *net_work;
	struct mlxsw_sp_router *router;

2691 2692
	router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
	if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
		return NOTIFY_DONE;

	net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
	if (!net_work)
		return NOTIFY_BAD;

	INIT_WORK(&net_work->work, cb);
	net_work->mlxsw_sp = router->mlxsw_sp;
	mlxsw_core_schedule_work(&net_work->work);
	return NOTIFY_DONE;
}

2705
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2706
					  unsigned long event, void *ptr)
2707
{
2708
	struct mlxsw_sp_netevent_work *net_work;
2709 2710 2711 2712
	struct mlxsw_sp_port *mlxsw_sp_port;
	struct mlxsw_sp *mlxsw_sp;
	unsigned long interval;
	struct neigh_parms *p;
2713
	struct neighbour *n;
2714 2715 2716 2717 2718 2719

	switch (event) {
	case NETEVENT_DELAY_PROBE_TIME_UPDATE:
		p = ptr;

		/* We don't care about changes in the default table. */
2720 2721
		if (!p->dev || (p->tbl->family != AF_INET &&
				p->tbl->family != AF_INET6))
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732
			return NOTIFY_DONE;

		/* We are in atomic context and can't take RTNL mutex,
		 * so use RCU variant to walk the device chain.
		 */
		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
		if (!mlxsw_sp_port)
			return NOTIFY_DONE;

		mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
		interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2733
		mlxsw_sp->router->neighs_update.interval = interval;
2734 2735 2736

		mlxsw_sp_port_dev_put(mlxsw_sp_port);
		break;
2737 2738 2739
	case NETEVENT_NEIGH_UPDATE:
		n = ptr;

2740
		if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2741 2742
			return NOTIFY_DONE;

2743
		mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2744 2745 2746
		if (!mlxsw_sp_port)
			return NOTIFY_DONE;

2747 2748
		net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
		if (!net_work) {
2749
			mlxsw_sp_port_dev_put(mlxsw_sp_port);
2750
			return NOTIFY_BAD;
2751
		}
2752

2753 2754 2755
		INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
		net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
		net_work->n = n;
2756 2757 2758 2759 2760 2761

		/* Take a reference to ensure the neighbour won't be
		 * destructed until we drop the reference in delayed
		 * work.
		 */
		neigh_clone(n);
2762
		mlxsw_core_schedule_work(&net_work->work);
2763
		mlxsw_sp_port_dev_put(mlxsw_sp_port);
2764
		break;
2765
	case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2766
	case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2767 2768
		return mlxsw_sp_router_schedule_work(ptr, nb,
				mlxsw_sp_router_mp_hash_event_work);
2769

2770 2771 2772
	case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
		return mlxsw_sp_router_schedule_work(ptr, nb,
				mlxsw_sp_router_update_priority_work);
2773 2774 2775 2776 2777
	}

	return NOTIFY_DONE;
}

2778 2779
static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
2780 2781
	int err;

2782
	err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2783 2784 2785 2786 2787 2788 2789 2790 2791
			      &mlxsw_sp_neigh_ht_params);
	if (err)
		return err;

	/* Initialize the polling interval according to the default
	 * table.
	 */
	mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);

2792
	/* Create the delayed works for the activity_update */
2793
	INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2794
			  mlxsw_sp_router_neighs_update_work);
2795
	INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2796
			  mlxsw_sp_router_probe_unresolved_nexthops);
2797 2798
	mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
	mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2799
	return 0;
2800 2801 2802 2803
}

static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
2804 2805 2806
	cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
	cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2807 2808
}

2809
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2810
					 struct mlxsw_sp_rif *rif)
2811 2812 2813
{
	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;

2814
	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2815 2816
				 rif_list_node) {
		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2817
		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2818
	}
2819 2820
}

2821 2822
enum mlxsw_sp_nexthop_type {
	MLXSW_SP_NEXTHOP_TYPE_ETH,
2823
	MLXSW_SP_NEXTHOP_TYPE_IPIP,
2824 2825
};

2826 2827 2828 2829
struct mlxsw_sp_nexthop_key {
	struct fib_nh *fib_nh;
};

2830 2831
struct mlxsw_sp_nexthop {
	struct list_head neigh_list_node; /* member of neigh entry list */
2832
	struct list_head rif_list_node;
2833
	struct list_head router_list_node;
2834 2835 2836
	struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
						* this belongs to
						*/
2837
	struct rhash_head ht_node;
2838
	struct neigh_table *neigh_tbl;
2839
	struct mlxsw_sp_nexthop_key key;
2840
	unsigned char gw_addr[sizeof(struct in6_addr)];
2841
	int ifindex;
2842
	int nh_weight;
2843 2844
	int norm_nh_weight;
	int num_adj_entries;
2845
	struct mlxsw_sp_rif *rif;
2846 2847 2848 2849 2850 2851 2852 2853 2854
	u8 should_offload:1, /* set indicates this neigh is connected and
			      * should be put to KVD linear area of this group.
			      */
	   offloaded:1, /* set in case the neigh is actually put into
			 * KVD linear area of this group.
			 */
	   update:1; /* set indicates that MAC of this neigh should be
		      * updated in HW
		      */
2855 2856 2857
	enum mlxsw_sp_nexthop_type type;
	union {
		struct mlxsw_sp_neigh_entry *neigh_entry;
2858
		struct mlxsw_sp_ipip_entry *ipip_entry;
2859
	};
2860 2861
	unsigned int counter_index;
	bool counter_valid;
2862 2863
};

2864 2865 2866 2867 2868
enum mlxsw_sp_nexthop_group_type {
	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
	MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
};

2869
struct mlxsw_sp_nexthop_group {
2870 2871 2872 2873 2874
	union {
		struct {
			struct fib_info *fi;
		} ipv4;
	};
2875
	struct rhash_head ht_node;
2876
	struct list_head fib_list; /* list of fib entries that use this group */
2877
	enum mlxsw_sp_nexthop_group_type type;
2878 2879
	u8 adj_index_valid:1,
	   gateway:1; /* routes using the group use a gateway */
2880 2881 2882
	u32 adj_index;
	u16 ecmp_size;
	u16 count;
2883
	int sum_norm_weight;
2884
	struct mlxsw_sp_nexthop nexthops[0];
2885
#define nh_rif	nexthops[0].rif
2886 2887
};

2888 2889
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_nexthop *nh)
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
{
	struct devlink *devlink;

	devlink = priv_to_devlink(mlxsw_sp->core);
	if (!devlink_dpipe_table_counter_enabled(devlink,
						 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
		return;

	if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
		return;

	nh->counter_valid = true;
}

2904 2905
void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_nexthop *nh)
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
{
	if (!nh->counter_valid)
		return;
	mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
	nh->counter_valid = false;
}

int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
{
	if (!nh->counter_valid)
		return -EINVAL;

	return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
					 p_counter, NULL);
}

2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
					       struct mlxsw_sp_nexthop *nh)
{
	if (!nh) {
		if (list_empty(&router->nexthop_list))
			return NULL;
		else
			return list_first_entry(&router->nexthop_list,
						typeof(*nh), router_list_node);
	}
	if (list_is_last(&nh->router_list_node, &router->nexthop_list))
		return NULL;
	return list_next_entry(nh, router_list_node);
}

bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
{
	return nh->offloaded;
}

unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
{
	if (!nh->offloaded)
		return NULL;
	return nh->neigh_entry->ha;
}

int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2951
			     u32 *p_adj_size, u32 *p_adj_hash_index)
2952 2953 2954 2955 2956 2957 2958 2959 2960
{
	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
	u32 adj_hash_index = 0;
	int i;

	if (!nh->offloaded || !nh_grp->adj_index_valid)
		return -EINVAL;

	*p_adj_index = nh_grp->adj_index;
2961
	*p_adj_size = nh_grp->ecmp_size;
2962 2963 2964 2965 2966 2967 2968

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];

		if (nh_iter == nh)
			break;
		if (nh_iter->offloaded)
2969
			adj_hash_index += nh_iter->num_adj_entries;
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
	}

	*p_adj_hash_index = adj_hash_index;
	return 0;
}

struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
{
	return nh->rif;
}

bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
{
	struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
	int i;

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];

		if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
			return true;
	}
	return false;
}

2995
struct mlxsw_sp_nexthop_group_cmp_arg {
2996
	enum mlxsw_sp_nexthop_group_type type;
2997 2998 2999 3000
	union {
		struct fib_info *fi;
		struct mlxsw_sp_fib6_entry *fib6_entry;
	};
3001 3002
};

3003 3004
static bool
mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3005 3006
				    const struct in6_addr *gw, int ifindex,
				    int weight)
3007 3008 3009 3010 3011 3012 3013
{
	int i;

	for (i = 0; i < nh_grp->count; i++) {
		const struct mlxsw_sp_nexthop *nh;

		nh = &nh_grp->nexthops[i];
3014
		if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
		    ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
			return true;
	}

	return false;
}

static bool
mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
			    const struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	if (nh_grp->count != fib6_entry->nrt6)
		return false;

	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3032
		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3033
		struct in6_addr *gw;
3034
		int ifindex, weight;
3035

D
David Ahern 已提交
3036 3037 3038
		ifindex = fib6_nh->fib_nh_dev->ifindex;
		weight = fib6_nh->fib_nh_weight;
		gw = &fib6_nh->fib_nh_gw6;
3039 3040
		if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
							 weight))
3041 3042 3043 3044 3045 3046
			return false;
	}

	return true;
}

3047 3048 3049 3050 3051 3052
static int
mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
{
	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
	const struct mlxsw_sp_nexthop_group *nh_grp = ptr;

3053 3054 3055 3056 3057
	if (nh_grp->type != cmp_arg->type)
		return 1;

	switch (cmp_arg->type) {
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3058
		return cmp_arg->fi != nh_grp->ipv4.fi;
3059
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3060 3061 3062 3063 3064 3065 3066 3067
		return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
						    cmp_arg->fib6_entry);
	default:
		WARN_ON(1);
		return 1;
	}
}

3068 3069 3070
static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
{
	const struct mlxsw_sp_nexthop_group *nh_grp = data;
3071 3072 3073 3074
	const struct mlxsw_sp_nexthop *nh;
	struct fib_info *fi;
	unsigned int val;
	int i;
3075

3076 3077
	switch (nh_grp->type) {
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3078
		fi = nh_grp->ipv4.fi;
3079
		return jhash(&fi, sizeof(fi), seed);
3080
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3081 3082 3083
		val = nh_grp->count;
		for (i = 0; i < nh_grp->count; i++) {
			nh = &nh_grp->nexthops[i];
3084
			val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3085
			val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100
		}
		return jhash(&val, sizeof(val), seed);
	default:
		WARN_ON(1);
		return 0;
	}
}

static u32
mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
{
	unsigned int val = fib6_entry->nrt6;
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3101 3102 3103 3104
		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
		struct net_device *dev = fib6_nh->fib_nh_dev;
		struct in6_addr *gw = &fib6_nh->fib_nh_gw6;

3105
		val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3106
		val ^= jhash(gw, sizeof(*gw), seed);
3107 3108 3109
	}

	return jhash(&val, sizeof(val), seed);
3110 3111 3112 3113 3114 3115 3116
}

static u32
mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
{
	const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;

3117 3118
	switch (cmp_arg->type) {
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3119
		return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3120
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3121 3122 3123 3124 3125
		return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
	default:
		WARN_ON(1);
		return 0;
	}
3126 3127
}

3128 3129
static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
	.head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3130 3131 3132
	.hashfn	     = mlxsw_sp_nexthop_group_hash,
	.obj_hashfn  = mlxsw_sp_nexthop_group_hash_obj,
	.obj_cmpfn   = mlxsw_sp_nexthop_group_cmp,
3133 3134 3135 3136 3137
};

static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_nexthop_group *nh_grp)
{
3138
	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3139 3140 3141
	    !nh_grp->gateway)
		return 0;

3142
	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3143 3144 3145 3146 3147 3148 3149
				      &nh_grp->ht_node,
				      mlxsw_sp_nexthop_group_ht_params);
}

static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_nexthop_group *nh_grp)
{
3150
	if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3151 3152 3153
	    !nh_grp->gateway)
		return;

3154
	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3155 3156 3157 3158 3159
			       &nh_grp->ht_node,
			       mlxsw_sp_nexthop_group_ht_params);
}

static struct mlxsw_sp_nexthop_group *
3160 3161
mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
			       struct fib_info *fi)
3162
{
3163 3164
	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;

3165
	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3166 3167 3168
	cmp_arg.fi = fi;
	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
				      &cmp_arg,
3169 3170 3171
				      mlxsw_sp_nexthop_group_ht_params);
}

3172 3173 3174 3175 3176 3177
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;

3178
	cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3179 3180 3181 3182 3183 3184
	cmp_arg.fib6_entry = fib6_entry;
	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
				      &cmp_arg,
				      mlxsw_sp_nexthop_group_ht_params);
}

3185 3186 3187 3188 3189 3190 3191 3192 3193
static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
	.key_offset = offsetof(struct mlxsw_sp_nexthop, key),
	.head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
	.key_len = sizeof(struct mlxsw_sp_nexthop_key),
};

static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_nexthop *nh)
{
3194
	return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3195 3196 3197 3198 3199 3200
				      &nh->ht_node, mlxsw_sp_nexthop_ht_params);
}

static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_nexthop *nh)
{
3201
	rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3202 3203 3204
			       mlxsw_sp_nexthop_ht_params);
}

3205 3206 3207 3208
static struct mlxsw_sp_nexthop *
mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
			struct mlxsw_sp_nexthop_key key)
{
3209
	return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3210 3211 3212
				      mlxsw_sp_nexthop_ht_params);
}

3213
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3214
					     const struct mlxsw_sp_fib *fib,
3215 3216 3217 3218 3219 3220
					     u32 adj_index, u16 ecmp_size,
					     u32 new_adj_index,
					     u16 new_ecmp_size)
{
	char raleu_pl[MLXSW_REG_RALEU_LEN];

3221
	mlxsw_reg_raleu_pack(raleu_pl,
3222 3223
			     (enum mlxsw_reg_ralxx_protocol) fib->proto,
			     fib->vr->id, adj_index, ecmp_size, new_adj_index,
3224
			     new_ecmp_size);
3225 3226 3227 3228 3229 3230 3231 3232
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}

static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_nexthop_group *nh_grp,
					  u32 old_adj_index, u16 old_ecmp_size)
{
	struct mlxsw_sp_fib_entry *fib_entry;
3233
	struct mlxsw_sp_fib *fib = NULL;
3234 3235 3236
	int err;

	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3237
		if (fib == fib_entry->fib_node->fib)
3238
			continue;
3239 3240
		fib = fib_entry->fib_node->fib;
		err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3241 3242 3243 3244 3245 3246 3247 3248 3249 3250
							old_adj_index,
							old_ecmp_size,
							nh_grp->adj_index,
							nh_grp->ecmp_size);
		if (err)
			return err;
	}
	return 0;
}

3251 3252
static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
				     struct mlxsw_sp_nexthop *nh)
3253 3254 3255 3256 3257
{
	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
	char ratr_pl[MLXSW_REG_RATR_LEN];

	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3258 3259
			    true, MLXSW_REG_RATR_TYPE_ETHERNET,
			    adj_index, neigh_entry->rif);
3260
	mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3261 3262 3263 3264 3265
	if (nh->counter_valid)
		mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
	else
		mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);

3266 3267 3268
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
}

3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287
int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
			    struct mlxsw_sp_nexthop *nh)
{
	int i;

	for (i = 0; i < nh->num_adj_entries; i++) {
		int err;

		err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
		if (err)
			return err;
	}

	return 0;
}

static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
					  u32 adj_index,
					  struct mlxsw_sp_nexthop *nh)
3288 3289 3290 3291 3292 3293 3294
{
	const struct mlxsw_sp_ipip_ops *ipip_ops;

	ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
	return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
}

3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
					u32 adj_index,
					struct mlxsw_sp_nexthop *nh)
{
	int i;

	for (i = 0; i < nh->num_adj_entries; i++) {
		int err;

		err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
						     nh);
		if (err)
			return err;
	}

	return 0;
}

3313
static int
3314 3315 3316
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_nexthop_group *nh_grp,
			      bool reallocate)
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
{
	u32 adj_index = nh_grp->adj_index; /* base */
	struct mlxsw_sp_nexthop *nh;
	int i;

	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];

		if (!nh->should_offload) {
			nh->offloaded = 0;
			continue;
		}

3330
		if (nh->update || reallocate) {
3331 3332
			int err = 0;

3333 3334
			switch (nh->type) {
			case MLXSW_SP_NEXTHOP_TYPE_ETH:
3335
				err = mlxsw_sp_nexthop_update
3336 3337
					    (mlxsw_sp, adj_index, nh);
				break;
3338 3339 3340 3341
			case MLXSW_SP_NEXTHOP_TYPE_IPIP:
				err = mlxsw_sp_nexthop_ipip_update
					    (mlxsw_sp, adj_index, nh);
				break;
3342
			}
3343 3344 3345 3346 3347
			if (err)
				return err;
			nh->update = 0;
			nh->offloaded = 1;
		}
3348
		adj_index += nh->num_adj_entries;
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
	}
	return 0;
}

static int
mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_nexthop_group *nh_grp)
{
	struct mlxsw_sp_fib_entry *fib_entry;
	int err;

	list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
		err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
		if (err)
			return err;
	}
	return 0;
}

3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407
static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
{
	/* Valid sizes for an adjacency group are:
	 * 1-64, 512, 1024, 2048 and 4096.
	 */
	if (*p_adj_grp_size <= 64)
		return;
	else if (*p_adj_grp_size <= 512)
		*p_adj_grp_size = 512;
	else if (*p_adj_grp_size <= 1024)
		*p_adj_grp_size = 1024;
	else if (*p_adj_grp_size <= 2048)
		*p_adj_grp_size = 2048;
	else
		*p_adj_grp_size = 4096;
}

static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
					     unsigned int alloc_size)
{
	if (alloc_size >= 4096)
		*p_adj_grp_size = 4096;
	else if (alloc_size >= 2048)
		*p_adj_grp_size = 2048;
	else if (alloc_size >= 1024)
		*p_adj_grp_size = 1024;
	else if (alloc_size >= 512)
		*p_adj_grp_size = 512;
}

static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
				     u16 *p_adj_grp_size)
{
	unsigned int alloc_size;
	int err;

	/* Round up the requested group size to the next size supported
	 * by the device and make sure the request can be satisfied.
	 */
	mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3408 3409 3410
	err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
					      MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
					      *p_adj_grp_size, &alloc_size);
3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421
	if (err)
		return err;
	/* It is possible the allocation results in more allocated
	 * entries than requested. Try to use as much of them as
	 * possible.
	 */
	mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);

	return 0;
}

3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470
static void
mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
{
	int i, g = 0, sum_norm_weight = 0;
	struct mlxsw_sp_nexthop *nh;

	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];

		if (!nh->should_offload)
			continue;
		if (g > 0)
			g = gcd(nh->nh_weight, g);
		else
			g = nh->nh_weight;
	}

	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];

		if (!nh->should_offload)
			continue;
		nh->norm_nh_weight = nh->nh_weight / g;
		sum_norm_weight += nh->norm_nh_weight;
	}

	nh_grp->sum_norm_weight = sum_norm_weight;
}

static void
mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
{
	int total = nh_grp->sum_norm_weight;
	u16 ecmp_size = nh_grp->ecmp_size;
	int i, weight = 0, lower_bound = 0;

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
		int upper_bound;

		if (!nh->should_offload)
			continue;
		weight += nh->norm_nh_weight;
		upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
		nh->num_adj_entries = upper_bound - lower_bound;
		lower_bound = upper_bound;
	}
}

3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527
static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);

static void
mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_nexthop_group *nh_grp)
{
	int i;

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];

		if (nh->offloaded)
			nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
		else
			nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
	}
}

static void
__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
					  struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
		struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
		struct mlxsw_sp_nexthop *nh;

		nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
		if (nh && nh->offloaded)
			fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
		else
			fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
	}
}

static void
mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_nexthop_group *nh_grp)
{
	struct mlxsw_sp_fib6_entry *fib6_entry;

	/* Unfortunately, in IPv6 the route and the nexthop are described by
	 * the same struct, so we need to iterate over all the routes using the
	 * nexthop group and set / clear the offload indication for them.
	 */
	list_for_each_entry(fib6_entry, &nh_grp->fib_list,
			    common.nexthop_group_node)
		__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
}

static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop_group *nh_grp)
{
3528 3529
	switch (nh_grp->type) {
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3530 3531
		mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
		break;
3532
	case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3533 3534 3535 3536 3537
		mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
		break;
	}
}

3538 3539 3540 3541
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_nexthop_group *nh_grp)
{
3542
	u16 ecmp_size, old_ecmp_size;
3543 3544 3545 3546 3547 3548 3549 3550
	struct mlxsw_sp_nexthop *nh;
	bool offload_change = false;
	u32 adj_index;
	bool old_adj_index_valid;
	u32 old_adj_index;
	int i;
	int err;

3551 3552 3553 3554 3555
	if (!nh_grp->gateway) {
		mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
		return;
	}

3556 3557 3558
	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];

3559
		if (nh->should_offload != nh->offloaded) {
3560 3561 3562 3563 3564 3565 3566 3567 3568
			offload_change = true;
			if (nh->should_offload)
				nh->update = 1;
		}
	}
	if (!offload_change) {
		/* Nothing was added or removed, so no need to reallocate. Just
		 * update MAC on existing adjacency indexes.
		 */
3569
		err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3570 3571 3572 3573 3574 3575
		if (err) {
			dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
			goto set_trap;
		}
		return;
	}
3576 3577
	mlxsw_sp_nexthop_group_normalize(nh_grp);
	if (!nh_grp->sum_norm_weight)
3578 3579 3580 3581 3582
		/* No neigh of this group is connected so we just set
		 * the trap and let everthing flow through kernel.
		 */
		goto set_trap;

3583
	ecmp_size = nh_grp->sum_norm_weight;
3584 3585 3586 3587 3588
	err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
	if (err)
		/* No valid allocation size available. */
		goto set_trap;

3589 3590
	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
				  ecmp_size, &adj_index);
3591
	if (err) {
3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603
		/* We ran out of KVD linear space, just set the
		 * trap and let everything flow through kernel.
		 */
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
		goto set_trap;
	}
	old_adj_index_valid = nh_grp->adj_index_valid;
	old_adj_index = nh_grp->adj_index;
	old_ecmp_size = nh_grp->ecmp_size;
	nh_grp->adj_index_valid = 1;
	nh_grp->adj_index = adj_index;
	nh_grp->ecmp_size = ecmp_size;
3604
	mlxsw_sp_nexthop_group_rebalance(nh_grp);
3605
	err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3606 3607 3608 3609 3610
	if (err) {
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
		goto set_trap;
	}

3611 3612
	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);

3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
	if (!old_adj_index_valid) {
		/* The trap was set for fib entries, so we have to call
		 * fib entry update to unset it and use adjacency index.
		 */
		err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
		if (err) {
			dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
			goto set_trap;
		}
		return;
	}

	err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
					     old_adj_index, old_ecmp_size);
3627
	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3628
			   old_ecmp_size, old_adj_index);
3629 3630 3631 3632
	if (err) {
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
		goto set_trap;
	}
3633

3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
	return;

set_trap:
	old_adj_index_valid = nh_grp->adj_index_valid;
	nh_grp->adj_index_valid = 0;
	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];
		nh->offloaded = 0;
	}
	err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
	if (err)
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3646
	mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3647
	if (old_adj_index_valid)
3648
		mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3649
				   nh_grp->ecmp_size, nh_grp->adj_index);
3650 3651 3652 3653 3654
}

static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
					    bool removing)
{
3655
	if (!removing)
3656
		nh->should_offload = 1;
3657
	else
3658 3659 3660 3661
		nh->should_offload = 0;
	nh->update = 1;
}

3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
static int
mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_neigh_entry *neigh_entry)
{
	struct neighbour *n, *old_n = neigh_entry->key.n;
	struct mlxsw_sp_nexthop *nh;
	bool entry_connected;
	u8 nud_state, dead;
	int err;

	nh = list_first_entry(&neigh_entry->nexthop_list,
			      struct mlxsw_sp_nexthop, neigh_list_node);

3675
	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3676
	if (!n) {
3677
		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713
		if (IS_ERR(n))
			return PTR_ERR(n);
		neigh_event_send(n, NULL);
	}

	mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
	neigh_entry->key.n = n;
	err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
	if (err)
		goto err_neigh_entry_insert;

	read_lock_bh(&n->lock);
	nud_state = n->nud_state;
	dead = n->dead;
	read_unlock_bh(&n->lock);
	entry_connected = nud_state & NUD_VALID && !dead;

	list_for_each_entry(nh, &neigh_entry->nexthop_list,
			    neigh_list_node) {
		neigh_release(old_n);
		neigh_clone(n);
		__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
	}

	neigh_release(n);

	return 0;

err_neigh_entry_insert:
	neigh_entry->key.n = old_n;
	mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
	neigh_release(n);
	return err;
}

3714 3715 3716
static void
mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_neigh_entry *neigh_entry,
3717
			      bool removing, bool dead)
3718 3719 3720
{
	struct mlxsw_sp_nexthop *nh;

3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
	if (list_empty(&neigh_entry->nexthop_list))
		return;

	if (dead) {
		int err;

		err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
							  neigh_entry);
		if (err)
			dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
		return;
	}

3734 3735 3736 3737 3738 3739 3740
	list_for_each_entry(nh, &neigh_entry->nexthop_list,
			    neigh_list_node) {
		__mlxsw_sp_nexthop_neigh_update(nh, removing);
		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
	}
}

3741
static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3742
				      struct mlxsw_sp_rif *rif)
3743
{
3744
	if (nh->rif)
3745 3746
		return;

3747 3748
	nh->rif = rif;
	list_add(&nh->rif_list_node, &rif->nexthop_list);
3749 3750 3751 3752
}

static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
{
3753
	if (!nh->rif)
3754 3755 3756
		return;

	list_del(&nh->rif_list_node);
3757
	nh->rif = NULL;
3758 3759
}

3760 3761
static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop *nh)
3762 3763 3764
{
	struct mlxsw_sp_neigh_entry *neigh_entry;
	struct neighbour *n;
3765
	u8 nud_state, dead;
3766 3767
	int err;

3768
	if (!nh->nh_grp->gateway || nh->neigh_entry)
3769 3770
		return 0;

3771
	/* Take a reference of neigh here ensuring that neigh would
3772
	 * not be destructed before the nexthop entry is finished.
3773
	 * The reference is taken either in neigh_lookup() or
3774
	 * in neigh_create() in case n is not found.
3775
	 */
3776
	n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3777
	if (!n) {
3778
		n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3779 3780
		if (IS_ERR(n))
			return PTR_ERR(n);
3781
		neigh_event_send(n, NULL);
3782 3783 3784
	}
	neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
	if (!neigh_entry) {
3785 3786
		neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
		if (IS_ERR(neigh_entry)) {
3787 3788
			err = -EINVAL;
			goto err_neigh_entry_create;
3789
		}
3790
	}
3791 3792 3793 3794 3795 3796

	/* If that is the first nexthop connected to that neigh, add to
	 * nexthop_neighs_list
	 */
	if (list_empty(&neigh_entry->nexthop_list))
		list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3797
			      &mlxsw_sp->router->nexthop_neighs_list);
3798

3799 3800 3801 3802
	nh->neigh_entry = neigh_entry;
	list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
	read_lock_bh(&n->lock);
	nud_state = n->nud_state;
3803
	dead = n->dead;
3804
	read_unlock_bh(&n->lock);
3805
	__mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3806 3807

	return 0;
3808 3809 3810 3811

err_neigh_entry_create:
	neigh_release(n);
	return err;
3812 3813
}

3814 3815
static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_nexthop *nh)
3816 3817
{
	struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3818
	struct neighbour *n;
3819

3820
	if (!neigh_entry)
3821 3822
		return;
	n = neigh_entry->key.n;
3823

3824
	__mlxsw_sp_nexthop_neigh_update(nh, true);
3825
	list_del(&nh->neigh_list_node);
3826
	nh->neigh_entry = NULL;
3827 3828 3829 3830

	/* If that is the last nexthop connected to that neigh, remove from
	 * nexthop_neighs_list
	 */
3831 3832
	if (list_empty(&neigh_entry->nexthop_list))
		list_del(&neigh_entry->nexthop_neighs_list_node);
3833

3834 3835 3836 3837
	if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);

	neigh_release(n);
3838
}
3839

3840 3841
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
3842 3843 3844 3845 3846 3847 3848
	struct net_device *ul_dev;
	bool is_up;

	rcu_read_lock();
	ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
	is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
	rcu_read_unlock();
3849

3850
	return is_up;
3851 3852
}

3853 3854 3855
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop *nh,
				       struct mlxsw_sp_ipip_entry *ipip_entry)
3856
{
3857 3858
	bool removing;

3859
	if (!nh->nh_grp->gateway || nh->ipip_entry)
3860
		return;
3861

3862 3863
	nh->ipip_entry = ipip_entry;
	removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3864
	__mlxsw_sp_nexthop_neigh_update(nh, removing);
3865
	mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883
}

static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop *nh)
{
	struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;

	if (!ipip_entry)
		return;

	__mlxsw_sp_nexthop_neigh_update(nh, true);
	nh->ipip_entry = NULL;
}

static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
					const struct fib_nh *fib_nh,
					enum mlxsw_sp_ipip_type *p_ipipt)
{
D
David Ahern 已提交
3884
	struct net_device *dev = fib_nh->fib_nh_dev;
3885 3886 3887 3888 3889 3890

	return dev &&
	       fib_nh->nh_parent->fib_type == RTN_UNICAST &&
	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
}

3891 3892 3893 3894 3895 3896 3897 3898
static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop *nh)
{
	switch (nh->type) {
	case MLXSW_SP_NEXTHOP_TYPE_ETH:
		mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
		mlxsw_sp_nexthop_rif_fini(nh);
		break;
3899
	case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3900
		mlxsw_sp_nexthop_rif_fini(nh);
3901 3902
		mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
		break;
3903 3904 3905 3906 3907 3908 3909
	}
}

static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop *nh,
				       struct fib_nh *fib_nh)
{
3910
	const struct mlxsw_sp_ipip_ops *ipip_ops;
D
David Ahern 已提交
3911
	struct net_device *dev = fib_nh->fib_nh_dev;
3912
	struct mlxsw_sp_ipip_entry *ipip_entry;
3913 3914 3915
	struct mlxsw_sp_rif *rif;
	int err;

3916 3917 3918 3919 3920 3921 3922 3923 3924
	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
	if (ipip_entry) {
		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
		if (ipip_ops->can_offload(mlxsw_sp, dev,
					  MLXSW_SP_L3_PROTO_IPV4)) {
			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
			return 0;
		}
3925 3926
	}

3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949
	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!rif)
		return 0;

	mlxsw_sp_nexthop_rif_init(nh, rif);
	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
	if (err)
		goto err_neigh_init;

	return 0;

err_neigh_init:
	mlxsw_sp_nexthop_rif_fini(nh);
	return err;
}

static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_nexthop *nh)
{
	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
}

3950 3951 3952 3953
static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_nexthop_group *nh_grp,
				  struct mlxsw_sp_nexthop *nh,
				  struct fib_nh *fib_nh)
3954
{
D
David Ahern 已提交
3955
	struct net_device *dev = fib_nh->fib_nh_dev;
3956
	struct in_device *in_dev;
3957 3958 3959 3960
	int err;

	nh->nh_grp = nh_grp;
	nh->key.fib_nh = fib_nh;
3961
#ifdef CONFIG_IP_ROUTE_MULTIPATH
D
David Ahern 已提交
3962
	nh->nh_weight = fib_nh->fib_nh_weight;
3963 3964 3965
#else
	nh->nh_weight = 1;
#endif
D
David Ahern 已提交
3966
	memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3967
	nh->neigh_tbl = &arp_tbl;
3968 3969 3970 3971
	err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
	if (err)
		return err;

3972
	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3973 3974
	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);

3975 3976 3977
	if (!dev)
		return 0;

3978 3979
	rcu_read_lock();
	in_dev = __in_dev_get_rcu(dev);
3980
	if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3981 3982
	    fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
		rcu_read_unlock();
3983
		return 0;
3984 3985
	}
	rcu_read_unlock();
3986

3987
	err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3988 3989 3990 3991 3992 3993 3994 3995 3996 3997
	if (err)
		goto err_nexthop_neigh_init;

	return 0;

err_nexthop_neigh_init:
	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
	return err;
}

3998 3999
static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_nexthop *nh)
4000
{
4001
	mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4002
	list_del(&nh->router_list_node);
4003
	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4004
	mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4005 4006
}

4007 4008
static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
				    unsigned long event, struct fib_nh *fib_nh)
4009 4010 4011 4012
{
	struct mlxsw_sp_nexthop_key key;
	struct mlxsw_sp_nexthop *nh;

4013
	if (mlxsw_sp->router->aborted)
4014 4015 4016 4017
		return;

	key.fib_nh = fib_nh;
	nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4018
	if (!nh)
4019 4020 4021 4022
		return;

	switch (event) {
	case FIB_EVENT_NH_ADD:
4023
		mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
4024 4025
		break;
	case FIB_EVENT_NH_DEL:
4026
		mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4027 4028 4029 4030 4031 4032
		break;
	}

	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
}

4033 4034 4035 4036
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_rif *rif)
{
	struct mlxsw_sp_nexthop *nh;
4037
	bool removing;
4038 4039

	list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
		switch (nh->type) {
		case MLXSW_SP_NEXTHOP_TYPE_ETH:
			removing = false;
			break;
		case MLXSW_SP_NEXTHOP_TYPE_IPIP:
			removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
			break;
		default:
			WARN_ON(1);
			continue;
		}

		__mlxsw_sp_nexthop_neigh_update(nh, removing);
4053 4054 4055 4056
		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
	}
}

4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_rif *old_rif,
					 struct mlxsw_sp_rif *new_rif)
{
	struct mlxsw_sp_nexthop *nh;

	list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
	list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
		nh->rif = new_rif;
	mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
}

4069
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4070
					   struct mlxsw_sp_rif *rif)
4071 4072 4073
{
	struct mlxsw_sp_nexthop *nh, *tmp;

4074
	list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4075
		mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4076 4077 4078 4079
		mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
	}
}

4080
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4081
				   struct fib_info *fi)
4082
{
4083 4084 4085 4086
	const struct fib_nh *nh = fib_info_nh(fi, 0);

	return nh->fib_nh_scope == RT_SCOPE_LINK ||
	       mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4087 4088
}

4089
static struct mlxsw_sp_nexthop_group *
4090
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4091
{
4092
	unsigned int nhs = fib_info_num_path(fi);
4093 4094 4095 4096 4097 4098
	struct mlxsw_sp_nexthop_group *nh_grp;
	struct mlxsw_sp_nexthop *nh;
	struct fib_nh *fib_nh;
	int i;
	int err;

4099
	nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4100 4101 4102
	if (!nh_grp)
		return ERR_PTR(-ENOMEM);
	INIT_LIST_HEAD(&nh_grp->fib_list);
4103
	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
4104

4105
	nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4106
	nh_grp->count = nhs;
4107
	nh_grp->ipv4.fi = fi;
4108
	fib_info_hold(fi);
4109 4110
	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];
4111
		fib_nh = fib_info_nh(fi, i);
4112
		err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4113
		if (err)
4114
			goto err_nexthop4_init;
4115
	}
4116 4117 4118
	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
	if (err)
		goto err_nexthop_group_insert;
4119 4120 4121
	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
	return nh_grp;

4122
err_nexthop_group_insert:
4123
err_nexthop4_init:
4124 4125
	for (i--; i >= 0; i--) {
		nh = &nh_grp->nexthops[i];
4126
		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4127
	}
4128
	fib_info_put(fi);
4129 4130 4131 4132 4133
	kfree(nh_grp);
	return ERR_PTR(err);
}

static void
4134 4135
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_nexthop_group *nh_grp)
4136 4137 4138 4139
{
	struct mlxsw_sp_nexthop *nh;
	int i;

4140
	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4141 4142
	for (i = 0; i < nh_grp->count; i++) {
		nh = &nh_grp->nexthops[i];
4143
		mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4144
	}
4145 4146
	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
	WARN_ON_ONCE(nh_grp->adj_index_valid);
4147
	fib_info_put(nh_grp->ipv4.fi);
4148 4149 4150
	kfree(nh_grp);
}

4151 4152 4153
static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_fib_entry *fib_entry,
				       struct fib_info *fi)
4154 4155 4156
{
	struct mlxsw_sp_nexthop_group *nh_grp;

4157
	nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4158
	if (!nh_grp) {
4159
		nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4160 4161 4162 4163 4164 4165 4166 4167
		if (IS_ERR(nh_grp))
			return PTR_ERR(nh_grp);
	}
	list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
	fib_entry->nh_group = nh_grp;
	return 0;
}

4168 4169
static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_fib_entry *fib_entry)
4170 4171 4172 4173 4174 4175
{
	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;

	list_del(&fib_entry->nexthop_group_node);
	if (!list_empty(&nh_grp->fib_list))
		return;
4176
	mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4177 4178
}

4179 4180 4181 4182 4183 4184 4185 4186 4187 4188
static bool
mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
{
	struct mlxsw_sp_fib4_entry *fib4_entry;

	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
				  common);
	return !fib4_entry->tos;
}

4189 4190 4191 4192 4193
static bool
mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
{
	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;

4194 4195 4196 4197 4198 4199 4200 4201
	switch (fib_entry->fib_node->fib->proto) {
	case MLXSW_SP_L3_PROTO_IPV4:
		if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
			return false;
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
		break;
	}
4202

4203 4204 4205 4206
	switch (fib_entry->type) {
	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
		return !!nh_group->adj_index_valid;
	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4207
		return !!nh_group->nh_rif;
4208
	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4209
	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4210
	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4211
		return true;
4212 4213 4214 4215 4216
	default:
		return false;
	}
}

4217 4218 4219 4220 4221 4222 4223 4224
static struct mlxsw_sp_nexthop *
mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
		     const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
	int i;

	for (i = 0; i < nh_grp->count; i++) {
		struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4225
		struct fib6_info *rt = mlxsw_sp_rt6->rt;
4226

4227
		if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4228
		    ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4229
				    &rt->fib6_nh->fib_nh_gw6))
4230 4231 4232 4233 4234 4235 4236
			return nh;
		continue;
	}

	return NULL;
}

4237
static void
4238 4239
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_fib_entry *fib_entry)
4240
{
4241 4242 4243 4244 4245
	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
	int dst_len = fib_entry->fib_node->key.prefix_len;
	struct mlxsw_sp_fib4_entry *fib4_entry;
	struct fib_rt_info fri;
	bool should_offload;
4246

4247 4248 4249
	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
				  common);
4250
	fri.fi = fib4_entry->fi;
4251 4252 4253 4254 4255 4256 4257 4258
	fri.tb_id = fib4_entry->tb_id;
	fri.dst = cpu_to_be32(*p_dst);
	fri.dst_len = dst_len;
	fri.tos = fib4_entry->tos;
	fri.type = fib4_entry->type;
	fri.offload = should_offload;
	fri.trap = !should_offload;
	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4259 4260 4261
}

static void
4262 4263
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_fib_entry *fib_entry)
4264
{
4265 4266 4267 4268
	u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
	int dst_len = fib_entry->fib_node->key.prefix_len;
	struct mlxsw_sp_fib4_entry *fib4_entry;
	struct fib_rt_info fri;
4269

4270 4271
	fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
				  common);
4272
	fri.fi = fib4_entry->fi;
4273 4274 4275 4276 4277 4278 4279 4280
	fri.tb_id = fib4_entry->tb_id;
	fri.dst = cpu_to_be32(*p_dst);
	fri.dst_len = dst_len;
	fri.tos = fib4_entry->tos;
	fri.type = fib4_entry->type;
	fri.offload = false;
	fri.trap = false;
	fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4281 4282
}

4283
static void
4284 4285
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_fib_entry *fib_entry)
4286 4287 4288
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4289 4290 4291
	bool should_offload;

	should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4292

4293 4294 4295
	/* In IPv6 a multipath route is represented using multiple routes, so
	 * we need to set the flags on all of them.
	 */
4296 4297
	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
				  common);
4298 4299 4300
	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
				       !should_offload);
4301 4302 4303
}

static void
4304 4305
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_fib_entry *fib_entry)
4306 4307 4308 4309 4310 4311
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
				  common);
4312 4313
	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
		fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4314 4315
}

4316 4317 4318
static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_fib_entry *fib_entry)
4319
{
4320
	switch (fib_entry->fib_node->fib->proto) {
4321
	case MLXSW_SP_L3_PROTO_IPV4:
4322
		mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4323 4324
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
4325
		mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4326
		break;
4327 4328 4329 4330
	}
}

static void
4331 4332
mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_fib_entry *fib_entry)
4333
{
4334
	switch (fib_entry->fib_node->fib->proto) {
4335
	case MLXSW_SP_L3_PROTO_IPV4:
4336
		mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4337 4338
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
4339
		mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4340
		break;
4341 4342 4343 4344
	}
}

static void
4345 4346
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_fib_entry *fib_entry,
4347
				    enum mlxsw_sp_fib_entry_op op)
4348 4349
{
	switch (op) {
4350
	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4351
	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4352 4353
		mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
		break;
4354
	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
4355 4356
		mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
		break;
4357
	default:
4358
		break;
4359 4360 4361
	}
}

4362 4363 4364 4365
struct mlxsw_sp_fib_entry_op_ctx_basic {
	char ralue_pl[MLXSW_REG_RALUE_LEN];
};

4366 4367 4368 4369 4370
static void
mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
					enum mlxsw_sp_l3proto proto,
					enum mlxsw_sp_fib_entry_op op,
					u16 virtual_router, u8 prefix_len,
4371 4372
					unsigned char *addr,
					struct mlxsw_sp_fib_entry_priv *priv)
4373
{
4374
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4375
	enum mlxsw_reg_ralxx_protocol ralxx_proto;
4376
	char *ralue_pl = op_ctx_basic->ralue_pl;
4377
	enum mlxsw_reg_ralue_op ralue_op;
4378

4379
	ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
4380

4381 4382
	switch (op) {
	case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4383
	case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4384 4385 4386 4387 4388 4389 4390 4391 4392 4393
		ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
		break;
	case MLXSW_SP_FIB_ENTRY_OP_DELETE:
		ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
		break;
	default:
		WARN_ON_ONCE(1);
		return;
	}

4394
	switch (proto) {
4395
	case MLXSW_SP_L3_PROTO_IPV4:
4396
		mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
4397
				      virtual_router, prefix_len, (u32 *) addr);
4398 4399
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
4400 4401
		mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
				      virtual_router, prefix_len, addr);
4402 4403 4404 4405
		break;
	}
}

4406 4407 4408 4409 4410
static void
mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
						   enum mlxsw_reg_ralue_trap_action trap_action,
						   u16 trap_id, u32 adjacency_index, u16 ecmp_size)
{
4411 4412 4413 4414
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;

	mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
					trap_id, adjacency_index, ecmp_size);
4415 4416 4417 4418 4419 4420 4421
}

static void
mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
						  enum mlxsw_reg_ralue_trap_action trap_action,
						  u16 trap_id, u16 local_erif)
{
4422 4423 4424 4425
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;

	mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
				       trap_id, local_erif);
4426 4427 4428 4429 4430
}

static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
{
4431 4432 4433
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;

	mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
4434 4435 4436 4437 4438 4439
}

static void
mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
						      u32 tunnel_ptr)
{
4440 4441 4442
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;

	mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
4443 4444 4445 4446
}

static int
mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
4447 4448
					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
					  bool *postponed_for_bulk)
4449
{
4450 4451 4452 4453
	struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
			       op_ctx_basic->ralue_pl);
4454 4455
}

4456 4457 4458 4459 4460 4461
static bool
mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
{
	return true;
}

4462 4463 4464 4465 4466 4467
static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
				    struct mlxsw_sp_fib_entry *fib_entry,
				    enum mlxsw_sp_fib_entry_op op)
{
	struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;

4468
	mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
4469 4470
	fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
				    fib_entry->fib_node->key.prefix_len,
4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485
				    fib_entry->fib_node->key.addr,
				    fib_entry->priv);
}

int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
			      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
			      const struct mlxsw_sp_router_ll_ops *ll_ops)
{
	bool postponed_for_bulk = false;
	int err;

	err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
	if (!postponed_for_bulk)
		mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
	return err;
4486 4487
}

4488 4489 4490 4491
static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
{
	enum mlxsw_reg_ratr_trap_action trap_action;
	char ratr_pl[MLXSW_REG_RATR_LEN];
4492 4493 4494 4495 4496 4497 4498 4499 4500
	int err;

	if (mlxsw_sp->router->adj_discard_index_valid)
		return 0;

	err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
				  &mlxsw_sp->router->adj_discard_index);
	if (err)
		return err;
4501 4502 4503

	trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
	mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4504 4505
			    MLXSW_REG_RATR_TYPE_ETHERNET,
			    mlxsw_sp->router->adj_discard_index, rif_index);
4506
	mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
	if (err)
		goto err_ratr_write;

	mlxsw_sp->router->adj_discard_index_valid = true;

	return 0;

err_ratr_write:
	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
			   mlxsw_sp->router->adj_discard_index);
	return err;
4519 4520
}

4521
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4522
					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4523
					struct mlxsw_sp_fib_entry *fib_entry,
4524
					enum mlxsw_sp_fib_entry_op op)
4525
{
4526
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4527
	struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4528 4529 4530 4531
	enum mlxsw_reg_ralue_trap_action trap_action;
	u16 trap_id = 0;
	u32 adjacency_index = 0;
	u16 ecmp_size = 0;
4532
	int err;
4533 4534 4535 4536 4537

	/* In case the nexthop group adjacency index is valid, use it
	 * with provided ECMP size. Otherwise, setup trap and pass
	 * traffic to kernel.
	 */
4538
	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4539 4540 4541
		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
		adjacency_index = fib_entry->nh_group->adj_index;
		ecmp_size = fib_entry->nh_group->ecmp_size;
4542 4543 4544 4545 4546 4547 4548 4549 4550
	} else if (!nh_group->adj_index_valid && nh_group->count &&
		   nh_group->nh_rif) {
		err = mlxsw_sp_adj_discard_write(mlxsw_sp,
						 nh_group->nh_rif->rif_index);
		if (err)
			return err;
		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
		adjacency_index = mlxsw_sp->router->adj_discard_index;
		ecmp_size = 1;
4551 4552 4553 4554 4555
	} else {
		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
	}

4556
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4557 4558
	ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
					  adjacency_index, ecmp_size);
4559
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4560 4561
}

4562
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4563
				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4564
				       struct mlxsw_sp_fib_entry *fib_entry,
4565
				       enum mlxsw_sp_fib_entry_op op)
4566
{
4567
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4568
	struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4569 4570
	enum mlxsw_reg_ralue_trap_action trap_action;
	u16 trap_id = 0;
4571
	u16 rif_index = 0;
4572 4573 4574

	if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4575
		rif_index = rif->rif_index;
4576 4577 4578 4579
	} else {
		trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
		trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
	}
4580

4581
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4582
	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
4583
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4584 4585
}

4586
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4587
				      struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4588
				      struct mlxsw_sp_fib_entry *fib_entry,
4589
				      enum mlxsw_sp_fib_entry_op op)
4590
{
4591
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4592

4593
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4594
	ll_ops->fib_entry_act_ip2me_pack(op_ctx);
4595
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4596 4597
}

4598
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4599
					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4600
					   struct mlxsw_sp_fib_entry *fib_entry,
4601
					   enum mlxsw_sp_fib_entry_op op)
4602
{
4603
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4604 4605 4606
	enum mlxsw_reg_ralue_trap_action trap_action;

	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4607
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4608
	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
4609
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4610 4611
}

4612 4613
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4614
				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4615
				  struct mlxsw_sp_fib_entry *fib_entry,
4616
				  enum mlxsw_sp_fib_entry_op op)
4617
{
4618
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4619 4620 4621 4622 4623 4624
	enum mlxsw_reg_ralue_trap_action trap_action;
	u16 trap_id;

	trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
	trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;

4625
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4626
	ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
4627
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4628 4629
}

4630 4631
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4632
				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4633
				 struct mlxsw_sp_fib_entry *fib_entry,
4634
				 enum mlxsw_sp_fib_entry_op op)
4635
{
4636
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4637 4638 4639 4640 4641 4642 4643
	struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
	const struct mlxsw_sp_ipip_ops *ipip_ops;

	if (WARN_ON(!ipip_entry))
		return -EINVAL;

	ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4644
	return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
4645
				      fib_entry->decap.tunnel_index, fib_entry->priv);
4646 4647
}

4648
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4649
					   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4650
					   struct mlxsw_sp_fib_entry *fib_entry,
4651
					   enum mlxsw_sp_fib_entry_op op)
4652
{
4653
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4654

4655
	mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4656 4657
	ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
					     fib_entry->decap.tunnel_index);
4658
	return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4659 4660
}

4661
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4662
				   struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4663
				   struct mlxsw_sp_fib_entry *fib_entry,
4664
				   enum mlxsw_sp_fib_entry_op op)
4665 4666 4667
{
	switch (fib_entry->type) {
	case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4668
		return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
4669
	case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4670
		return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
4671
	case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4672
		return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
4673
	case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4674
		return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
4675
	case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4676
		return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
4677
	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4678
		return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
4679
	case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4680
		return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
4681 4682 4683 4684 4685
	}
	return -EINVAL;
}

static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4686
				 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4687
				 struct mlxsw_sp_fib_entry *fib_entry,
4688
				 enum mlxsw_sp_fib_entry_op op)
4689
{
4690
	int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
4691

4692 4693 4694 4695
	if (err)
		return err;

	mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4696

4697
	return err;
4698 4699
}

4700 4701
static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4702 4703
				       struct mlxsw_sp_fib_entry *fib_entry,
				       bool is_new)
4704 4705
{
	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4706 4707
				     is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
					      MLXSW_SP_FIB_ENTRY_OP_UPDATE);
4708 4709
}

4710 4711 4712
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_fib_entry *fib_entry)
{
4713
	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
4714

4715
	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
4716
	return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
4717 4718 4719
}

static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4720
				  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4721 4722
				  struct mlxsw_sp_fib_entry *fib_entry)
{
4723 4724 4725 4726
	const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;

	if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
		return 0;
4727
	return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4728
				     MLXSW_SP_FIB_ENTRY_OP_DELETE);
4729 4730 4731
}

static int
4732 4733 4734
mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
			     const struct fib_entry_notifier_info *fen_info,
			     struct mlxsw_sp_fib_entry *fib_entry)
4735
{
4736
	struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4737
	union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4738
	struct mlxsw_sp_router *router = mlxsw_sp->router;
4739
	u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4740
	struct mlxsw_sp_ipip_entry *ipip_entry;
4741
	struct fib_info *fi = fen_info->fi;
4742

4743 4744
	switch (fen_info->type) {
	case RTN_LOCAL:
4745 4746
		ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
						 MLXSW_SP_L3_PROTO_IPV4, dip);
4747
		if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4748 4749 4750 4751 4752
			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
			return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
							     fib_entry,
							     ipip_entry);
		}
4753 4754 4755 4756
		if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
						 MLXSW_SP_L3_PROTO_IPV4,
						 &dip)) {
			u32 tunnel_index;
4757

4758 4759
			tunnel_index = router->nve_decap_config.tunnel_index;
			fib_entry->decap.tunnel_index = tunnel_index;
4760 4761 4762
			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
			return 0;
		}
4763
		fallthrough;
4764
	case RTN_BROADCAST:
4765 4766
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
		return 0;
4767 4768 4769
	case RTN_BLACKHOLE:
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
		return 0;
4770
	case RTN_UNREACHABLE:
4771 4772 4773 4774 4775
	case RTN_PROHIBIT:
		/* Packets hitting these routes need to be trapped, but
		 * can do so with a lower priority than packets directed
		 * at the host, so use action type local instead of trap.
		 */
4776
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4777 4778
		return 0;
	case RTN_UNICAST:
4779
		if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4780
			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4781 4782
		else
			fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4783 4784 4785 4786
		return 0;
	default:
		return -EINVAL;
	}
4787 4788
}

4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801
static void
mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_fib_entry *fib_entry)
{
	switch (fib_entry->type) {
	case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
		mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
		break;
	default:
		break;
	}
}

4802
static struct mlxsw_sp_fib4_entry *
4803 4804 4805
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_fib_node *fib_node,
			   const struct fib_entry_notifier_info *fen_info)
4806
{
4807
	struct mlxsw_sp_fib4_entry *fib4_entry;
4808 4809 4810
	struct mlxsw_sp_fib_entry *fib_entry;
	int err;

4811 4812 4813 4814
	fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
	if (!fib4_entry)
		return ERR_PTR(-ENOMEM);
	fib_entry = &fib4_entry->common;
4815

4816 4817 4818 4819 4820 4821
	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
	if (IS_ERR(fib_entry->priv)) {
		err = PTR_ERR(fib_entry->priv);
		goto err_fib_entry_priv_create;
	}

4822
	err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4823
	if (err)
4824
		goto err_fib4_entry_type_set;
4825

4826
	err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4827
	if (err)
4828
		goto err_nexthop4_group_get;
4829

4830 4831
	fib4_entry->fi = fen_info->fi;
	fib_info_hold(fib4_entry->fi);
4832 4833 4834
	fib4_entry->tb_id = fen_info->tb_id;
	fib4_entry->type = fen_info->type;
	fib4_entry->tos = fen_info->tos;
4835 4836 4837

	fib_entry->fib_node = fib_node;

4838
	return fib4_entry;
4839

4840
err_nexthop4_group_get:
4841
	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4842
err_fib4_entry_type_set:
4843 4844
	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
err_fib_entry_priv_create:
4845
	kfree(fib4_entry);
4846 4847 4848
	return ERR_PTR(err);
}

4849
static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4850
					struct mlxsw_sp_fib4_entry *fib4_entry)
4851
{
4852
	fib_info_put(fib4_entry->fi);
4853
	mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4854
	mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4855
	mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
4856
	kfree(fib4_entry);
4857 4858
}

4859
static struct mlxsw_sp_fib4_entry *
4860 4861
mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
			   const struct fib_entry_notifier_info *fen_info)
4862
{
4863
	struct mlxsw_sp_fib4_entry *fib4_entry;
4864
	struct mlxsw_sp_fib_node *fib_node;
4865 4866 4867 4868 4869 4870 4871
	struct mlxsw_sp_fib *fib;
	struct mlxsw_sp_vr *vr;

	vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
	if (!vr)
		return NULL;
	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4872

4873 4874 4875 4876
	fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
					    sizeof(fen_info->dst),
					    fen_info->dst_len);
	if (!fib_node)
4877 4878
		return NULL;

4879 4880 4881 4882 4883
	fib4_entry = container_of(fib_node->fib_entry,
				  struct mlxsw_sp_fib4_entry, common);
	if (fib4_entry->tb_id == fen_info->tb_id &&
	    fib4_entry->tos == fen_info->tos &&
	    fib4_entry->type == fen_info->type &&
4884
	    fib4_entry->fi == fen_info->fi)
4885
		return fib4_entry;
4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923

	return NULL;
}

static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
	.key_offset = offsetof(struct mlxsw_sp_fib_node, key),
	.head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
	.key_len = sizeof(struct mlxsw_sp_fib_key),
	.automatic_shrinking = true,
};

static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
				    struct mlxsw_sp_fib_node *fib_node)
{
	return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
				      mlxsw_sp_fib_ht_params);
}

static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
				     struct mlxsw_sp_fib_node *fib_node)
{
	rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
			       mlxsw_sp_fib_ht_params);
}

static struct mlxsw_sp_fib_node *
mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
			 size_t addr_len, unsigned char prefix_len)
{
	struct mlxsw_sp_fib_key key;

	memset(&key, 0, sizeof(key));
	memcpy(key.addr, addr, addr_len);
	key.prefix_len = prefix_len;
	return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
}

static struct mlxsw_sp_fib_node *
4924
mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4925 4926 4927 4928 4929 4930
			 size_t addr_len, unsigned char prefix_len)
{
	struct mlxsw_sp_fib_node *fib_node;

	fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
	if (!fib_node)
4931 4932
		return NULL;

4933
	list_add(&fib_node->list, &fib->node_list);
4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945
	memcpy(fib_node->key.addr, addr, addr_len);
	fib_node->key.prefix_len = prefix_len;

	return fib_node;
}

static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
{
	list_del(&fib_node->list);
	kfree(fib_node);
}

4946 4947 4948
static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
				      struct mlxsw_sp_fib_node *fib_node)
{
4949
	struct mlxsw_sp_prefix_usage req_prefix_usage;
4950
	struct mlxsw_sp_fib *fib = fib_node->fib;
4951 4952 4953
	struct mlxsw_sp_lpm_tree *lpm_tree;
	int err;

4954 4955 4956
	lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
	if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
		goto out;
4957

4958 4959
	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
	mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4960 4961 4962 4963 4964 4965 4966
	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
					 fib->proto);
	if (IS_ERR(lpm_tree))
		return PTR_ERR(lpm_tree);

	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
	if (err)
4967
		goto err_lpm_tree_replace;
4968

4969 4970
out:
	lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4971
	return 0;
4972 4973 4974 4975

err_lpm_tree_replace:
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
	return err;
4976 4977 4978
}

static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4979
					 struct mlxsw_sp_fib_node *fib_node)
4980
{
4981 4982
	struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
	struct mlxsw_sp_prefix_usage req_prefix_usage;
4983
	struct mlxsw_sp_fib *fib = fib_node->fib;
4984
	int err;
4985

4986
	if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4987
		return;
4988 4989
	/* Try to construct a new LPM tree from the current prefix usage
	 * minus the unused one. If we fail, continue using the old one.
4990
	 */
4991 4992 4993 4994 4995 4996 4997
	mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
	mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
				    fib_node->key.prefix_len);
	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
					 fib->proto);
	if (IS_ERR(lpm_tree))
		return;
4998

4999 5000 5001
	err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
	if (err)
		goto err_lpm_tree_replace;
5002

5003
	return;
5004

5005 5006
err_lpm_tree_replace:
	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5007 5008
}

5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019
static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_fib_node *fib_node,
				  struct mlxsw_sp_fib *fib)
{
	int err;

	err = mlxsw_sp_fib_node_insert(fib, fib_node);
	if (err)
		return err;
	fib_node->fib = fib;

5020
	err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
5021 5022
	if (err)
		goto err_fib_lpm_tree_link;
5023 5024 5025

	return 0;

5026
err_fib_lpm_tree_link:
5027 5028 5029 5030 5031 5032 5033 5034 5035 5036
	fib_node->fib = NULL;
	mlxsw_sp_fib_node_remove(fib, fib_node);
	return err;
}

static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_fib_node *fib_node)
{
	struct mlxsw_sp_fib *fib = fib_node->fib;

5037
	mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
5038 5039 5040 5041
	fib_node->fib = NULL;
	mlxsw_sp_fib_node_remove(fib, fib_node);
}

5042
static struct mlxsw_sp_fib_node *
5043 5044 5045
mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
		      size_t addr_len, unsigned char prefix_len,
		      enum mlxsw_sp_l3proto proto)
5046
{
5047
	struct mlxsw_sp_fib_node *fib_node;
5048
	struct mlxsw_sp_fib *fib;
5049 5050 5051
	struct mlxsw_sp_vr *vr;
	int err;

5052
	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
5053 5054
	if (IS_ERR(vr))
		return ERR_CAST(vr);
5055
	fib = mlxsw_sp_vr_fib(vr, proto);
5056

5057
	fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
5058 5059
	if (fib_node)
		return fib_node;
5060

5061
	fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
5062 5063 5064
	if (!fib_node) {
		err = -ENOMEM;
		goto err_fib_node_create;
5065
	}
5066

5067 5068 5069 5070
	err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
	if (err)
		goto err_fib_node_init;

5071 5072
	return fib_node;

5073 5074
err_fib_node_init:
	mlxsw_sp_fib_node_destroy(fib_node);
5075
err_fib_node_create:
5076
	mlxsw_sp_vr_put(mlxsw_sp, vr);
5077
	return ERR_PTR(err);
5078 5079
}

5080 5081
static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_fib_node *fib_node)
5082
{
5083
	struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5084

5085
	if (fib_node->fib_entry)
5086
		return;
5087
	mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
5088
	mlxsw_sp_fib_node_destroy(fib_node);
5089
	mlxsw_sp_vr_put(mlxsw_sp, vr);
5090 5091
}

5092
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5093
					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5094
					struct mlxsw_sp_fib_entry *fib_entry)
5095
{
5096
	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5097
	bool is_new = !fib_node->fib_entry;
5098 5099
	int err;

5100
	fib_node->fib_entry = fib_entry;
5101

5102
	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
5103
	if (err)
5104
		goto err_fib_entry_update;
5105 5106 5107

	return 0;

5108 5109
err_fib_entry_update:
	fib_node->fib_entry = NULL;
5110 5111 5112
	return err;
}

5113 5114 5115
static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
					    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
					    struct mlxsw_sp_fib_entry *fib_entry)
5116
{
5117
	struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5118
	int err;
5119

5120
	err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
5121
	fib_node->fib_entry = NULL;
5122
	return err;
5123 5124
}

5125 5126 5127
static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
					   struct mlxsw_sp_fib_entry *fib_entry)
{
5128
	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5129

5130 5131
	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
	__mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
5132 5133
}

5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
{
	struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
	struct mlxsw_sp_fib4_entry *fib4_replaced;

	if (!fib_node->fib_entry)
		return true;

	fib4_replaced = container_of(fib_node->fib_entry,
				     struct mlxsw_sp_fib4_entry, common);
	if (fib4_entry->tb_id == RT_TABLE_MAIN &&
	    fib4_replaced->tb_id == RT_TABLE_LOCAL)
		return false;

	return true;
}

5151
static int
5152
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
5153
			     struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5154
			     const struct fib_entry_notifier_info *fen_info)
5155
{
5156 5157
	struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
	struct mlxsw_sp_fib_entry *replaced;
5158
	struct mlxsw_sp_fib_node *fib_node;
5159 5160
	int err;

5161
	if (mlxsw_sp->router->aborted)
5162 5163
		return 0;

5164 5165 5166 5167
	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
					 &fen_info->dst, sizeof(fen_info->dst),
					 fen_info->dst_len,
					 MLXSW_SP_L3_PROTO_IPV4);
5168 5169 5170
	if (IS_ERR(fib_node)) {
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
		return PTR_ERR(fib_node);
5171
	}
5172

5173 5174
	fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
	if (IS_ERR(fib4_entry)) {
5175
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
5176
		err = PTR_ERR(fib4_entry);
5177 5178
		goto err_fib4_entry_create;
	}
5179

5180 5181 5182 5183 5184 5185
	if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
		mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
		return 0;
	}

5186
	replaced = fib_node->fib_entry;
5187
	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
5188
	if (err) {
5189
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
5190
		goto err_fib_node_entry_link;
5191
	}
5192

5193 5194 5195 5196
	/* Nothing to replace */
	if (!replaced)
		return 0;

5197
	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5198 5199 5200
	fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
				     common);
	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
5201

5202 5203
	return 0;

5204
err_fib_node_entry_link:
5205
	fib_node->fib_entry = replaced;
5206
	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5207
err_fib4_entry_create:
5208
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5209 5210 5211
	return err;
}

5212 5213 5214
static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
				    struct fib_entry_notifier_info *fen_info)
5215
{
5216
	struct mlxsw_sp_fib4_entry *fib4_entry;
5217
	struct mlxsw_sp_fib_node *fib_node;
5218
	int err;
5219

5220
	if (mlxsw_sp->router->aborted)
5221
		return 0;
5222

5223
	fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
5224
	if (!fib4_entry)
5225
		return 0;
5226
	fib_node = fib4_entry->common.fib_node;
5227

5228
	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
5229
	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5230
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5231
	return err;
5232
}
5233

5234
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5235 5236 5237 5238
{
	/* Multicast routes aren't supported, so ignore them. Neighbour
	 * Discovery packets are specifically trapped.
	 */
5239
	if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5240 5241 5242
		return true;

	/* Cloned routes are irrelevant in the forwarding path. */
5243
	if (rt->fib6_flags & RTF_CACHE)
5244 5245 5246 5247 5248
		return true;

	return false;
}

5249
static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
	if (!mlxsw_sp_rt6)
		return ERR_PTR(-ENOMEM);

	/* In case of route replace, replaced route is deleted with
	 * no notification. Take reference to prevent accessing freed
	 * memory.
	 */
	mlxsw_sp_rt6->rt = rt;
5262
	fib6_info_hold(rt);
5263 5264 5265 5266 5267

	return mlxsw_sp_rt6;
}

#if IS_ENABLED(CONFIG_IPV6)
5268
static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5269
{
5270
	fib6_info_release(rt);
5271 5272
}
#else
5273
static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5274 5275 5276 5277 5278 5279
{
}
#endif

static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
5280 5281 5282
	struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;

	fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5283 5284 5285 5286
	mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
	kfree(mlxsw_sp_rt6);
}

5287
static struct fib6_info *
5288 5289 5290 5291 5292 5293 5294 5295
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
	return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
				list)->rt;
}

static struct mlxsw_sp_rt6 *
mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5296
			    const struct fib6_info *rt)
5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;

	list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
		if (mlxsw_sp_rt6->rt == rt)
			return mlxsw_sp_rt6;
	}

	return NULL;
}

5308
static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5309
					const struct fib6_info *rt,
5310 5311
					enum mlxsw_sp_ipip_type *ret)
{
5312 5313
	return rt->fib6_nh->fib_nh_dev &&
	       mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5314 5315
}

5316 5317 5318
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_nexthop_group *nh_grp,
				       struct mlxsw_sp_nexthop *nh,
5319
				       const struct fib6_info *rt)
5320
{
5321 5322
	const struct mlxsw_sp_ipip_ops *ipip_ops;
	struct mlxsw_sp_ipip_entry *ipip_entry;
5323
	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5324 5325 5326
	struct mlxsw_sp_rif *rif;
	int err;

5327 5328 5329 5330 5331 5332 5333 5334 5335
	ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
	if (ipip_entry) {
		ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
		if (ipip_ops->can_offload(mlxsw_sp, dev,
					  MLXSW_SP_L3_PROTO_IPV6)) {
			nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
			mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
			return 0;
		}
5336 5337
	}

5338
	nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!rif)
		return 0;
	mlxsw_sp_nexthop_rif_init(nh, rif);

	err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
	if (err)
		goto err_nexthop_neigh_init;

	return 0;

err_nexthop_neigh_init:
	mlxsw_sp_nexthop_rif_fini(nh);
	return err;
}

5355 5356 5357 5358 5359 5360 5361 5362 5363
static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_nexthop *nh)
{
	mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
}

static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_nexthop_group *nh_grp,
				  struct mlxsw_sp_nexthop *nh,
5364
				  const struct fib6_info *rt)
5365
{
5366
	struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5367 5368

	nh->nh_grp = nh_grp;
5369 5370
	nh->nh_weight = rt->fib6_nh->fib_nh_weight;
	memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5371 5372 5373
#if IS_ENABLED(CONFIG_IPV6)
	nh->neigh_tbl = &nd_tbl;
#endif
5374
	mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5375

5376 5377
	list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);

5378 5379 5380 5381 5382 5383 5384
	if (!dev)
		return 0;
	nh->ifindex = dev->ifindex;

	return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
}

5385 5386 5387
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_nexthop *nh)
{
5388
	mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5389
	list_del(&nh->router_list_node);
5390
	mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5391 5392
}

5393
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5394
				    const struct fib6_info *rt)
5395
{
5396
	return rt->fib6_nh->fib_nh_gw_family ||
5397
	       mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5398 5399
}

5400 5401 5402 5403 5404 5405 5406 5407 5408 5409
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
			       struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_nexthop_group *nh_grp;
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
	struct mlxsw_sp_nexthop *nh;
	int i = 0;
	int err;

5410 5411
	nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
			 GFP_KERNEL);
5412 5413 5414
	if (!nh_grp)
		return ERR_PTR(-ENOMEM);
	INIT_LIST_HEAD(&nh_grp->fib_list);
5415
	nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
5416 5417
	mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
					struct mlxsw_sp_rt6, list);
5418
	nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5419 5420
	nh_grp->count = fib6_entry->nrt6;
	for (i = 0; i < nh_grp->count; i++) {
5421
		struct fib6_info *rt = mlxsw_sp_rt6->rt;
5422 5423 5424 5425 5426 5427 5428

		nh = &nh_grp->nexthops[i];
		err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
		if (err)
			goto err_nexthop6_init;
		mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
	}
5429 5430 5431 5432 5433

	err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
	if (err)
		goto err_nexthop_group_insert;

5434 5435 5436
	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
	return nh_grp;

5437
err_nexthop_group_insert:
5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453
err_nexthop6_init:
	for (i--; i >= 0; i--) {
		nh = &nh_grp->nexthops[i];
		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
	}
	kfree(nh_grp);
	return ERR_PTR(err);
}

static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
				struct mlxsw_sp_nexthop_group *nh_grp)
{
	struct mlxsw_sp_nexthop *nh;
	int i = nh_grp->count;

5454
	mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468
	for (i--; i >= 0; i--) {
		nh = &nh_grp->nexthops[i];
		mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
	}
	mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
	WARN_ON(nh_grp->adj_index_valid);
	kfree(nh_grp);
}

static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_nexthop_group *nh_grp;

5469 5470 5471 5472 5473 5474
	nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
	if (!nh_grp) {
		nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
		if (IS_ERR(nh_grp))
			return PTR_ERR(nh_grp);
	}
5475 5476 5477 5478 5479

	list_add_tail(&fib6_entry->common.nexthop_group_node,
		      &nh_grp->fib_list);
	fib6_entry->common.nh_group = nh_grp;

5480 5481 5482 5483 5484
	/* The route and the nexthop are described by the same struct, so we
	 * need to the update the nexthop offload indication for the new route.
	 */
	__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);

5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498
	return 0;
}

static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_fib_entry *fib_entry)
{
	struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;

	list_del(&fib_entry->nexthop_group_node);
	if (!list_empty(&nh_grp->fib_list))
		return;
	mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}

5499 5500 5501
static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
					  struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
					  struct mlxsw_sp_fib6_entry *fib6_entry)
5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516
{
	struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
	int err;

	fib6_entry->common.nh_group = NULL;
	list_del(&fib6_entry->common.nexthop_group_node);

	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
	if (err)
		goto err_nexthop6_group_get;

	/* In case this entry is offloaded, then the adjacency index
	 * currently associated with it in the device's table is that
	 * of the old group. Start using the new one instead.
	 */
5517 5518
	err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
					  &fib6_entry->common, false);
5519
	if (err)
5520
		goto err_fib_entry_update;
5521 5522 5523 5524 5525 5526

	if (list_empty(&old_nh_grp->fib_list))
		mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);

	return 0;

5527
err_fib_entry_update:
5528 5529 5530 5531 5532 5533 5534 5535 5536 5537
	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
	list_add_tail(&fib6_entry->common.nexthop_group_node,
		      &old_nh_grp->fib_list);
	fib6_entry->common.nh_group = old_nh_grp;
	return err;
}

static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5538
				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5539
				struct mlxsw_sp_fib6_entry *fib6_entry,
5540
				struct fib6_info **rt_arr, unsigned int nrt6)
5541 5542
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5543
	int err, i;
5544

5545 5546 5547 5548 5549 5550
	for (i = 0; i < nrt6; i++) {
		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
		if (IS_ERR(mlxsw_sp_rt6)) {
			err = PTR_ERR(mlxsw_sp_rt6);
			goto err_rt6_create;
		}
5551

5552 5553 5554
		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
		fib6_entry->nrt6++;
	}
5555

5556
	err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5557 5558 5559 5560 5561 5562
	if (err)
		goto err_nexthop6_group_update;

	return 0;

err_nexthop6_group_update:
5563 5564 5565 5566 5567 5568 5569 5570 5571
	i = nrt6;
err_rt6_create:
	for (i--; i >= 0; i--) {
		fib6_entry->nrt6--;
		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
					       struct mlxsw_sp_rt6, list);
		list_del(&mlxsw_sp_rt6->list);
		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
	}
5572 5573 5574 5575 5576
	return err;
}

static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5577
				struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5578
				struct mlxsw_sp_fib6_entry *fib6_entry,
5579
				struct fib6_info **rt_arr, unsigned int nrt6)
5580 5581
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5582
	int i;
5583

5584 5585 5586 5587 5588 5589 5590 5591 5592 5593
	for (i = 0; i < nrt6; i++) {
		mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
							   rt_arr[i]);
		if (WARN_ON_ONCE(!mlxsw_sp_rt6))
			continue;

		fib6_entry->nrt6--;
		list_del(&mlxsw_sp_rt6->list);
		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
	}
5594

5595
	mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5596 5597
}

5598 5599
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
					 struct mlxsw_sp_fib_entry *fib_entry,
5600
					 const struct fib6_info *rt)
5601 5602 5603 5604 5605 5606 5607
{
	/* Packets hitting RTF_REJECT routes need to be discarded by the
	 * stack. We can rely on their destination device not having a
	 * RIF (it's the loopback device) and can thus use action type
	 * local, which will cause them to be trapped with a lower
	 * priority than packets that need to be locally received.
	 */
5608
	if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5609
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5610 5611
	else if (rt->fib6_type == RTN_BLACKHOLE)
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5612
	else if (rt->fib6_flags & RTF_REJECT)
5613
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5614
	else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
	else
		fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
}

static void
mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;

	list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
				 list) {
		fib6_entry->nrt6--;
		list_del(&mlxsw_sp_rt6->list);
		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
	}
}

static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
			   struct mlxsw_sp_fib_node *fib_node,
5636
			   struct fib6_info **rt_arr, unsigned int nrt6)
5637 5638 5639 5640
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_entry *fib_entry;
	struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5641
	int err, i;
5642 5643 5644 5645 5646 5647

	fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
	if (!fib6_entry)
		return ERR_PTR(-ENOMEM);
	fib_entry = &fib6_entry->common;

5648 5649 5650 5651 5652 5653
	fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
	if (IS_ERR(fib_entry->priv)) {
		err = PTR_ERR(fib_entry->priv);
		goto err_fib_entry_priv_create;
	}

5654 5655 5656 5657 5658 5659 5660 5661 5662 5663
	INIT_LIST_HEAD(&fib6_entry->rt6_list);

	for (i = 0; i < nrt6; i++) {
		mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
		if (IS_ERR(mlxsw_sp_rt6)) {
			err = PTR_ERR(mlxsw_sp_rt6);
			goto err_rt6_create;
		}
		list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
		fib6_entry->nrt6++;
5664 5665
	}

5666
	mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5667 5668 5669 5670 5671 5672 5673 5674 5675 5676

	err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
	if (err)
		goto err_nexthop6_group_get;

	fib_entry->fib_node = fib_node;

	return fib6_entry;

err_nexthop6_group_get:
5677
	i = nrt6;
5678
err_rt6_create:
5679 5680 5681 5682 5683 5684 5685
	for (i--; i >= 0; i--) {
		fib6_entry->nrt6--;
		mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
					       struct mlxsw_sp_rt6, list);
		list_del(&mlxsw_sp_rt6->list);
		mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
	}
5686 5687
	mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
err_fib_entry_priv_create:
5688 5689 5690 5691 5692 5693 5694 5695 5696 5697
	kfree(fib6_entry);
	return ERR_PTR(err);
}

static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
					struct mlxsw_sp_fib6_entry *fib6_entry)
{
	mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
	mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
	WARN_ON(fib6_entry->nrt6);
5698
	mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
5699 5700 5701 5702 5703
	kfree(fib6_entry);
}

static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5704
			   const struct fib6_info *rt)
5705 5706 5707 5708
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
	struct mlxsw_sp_fib *fib;
5709
	struct fib6_info *cmp_rt;
5710 5711
	struct mlxsw_sp_vr *vr;

5712
	vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5713 5714 5715 5716
	if (!vr)
		return NULL;
	fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);

5717 5718 5719
	fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
					    sizeof(rt->fib6_dst.addr),
					    rt->fib6_dst.plen);
5720 5721 5722
	if (!fib_node)
		return NULL;

5723 5724 5725 5726 5727 5728 5729
	fib6_entry = container_of(fib_node->fib_entry,
				  struct mlxsw_sp_fib6_entry, common);
	cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
	if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
	    rt->fib6_metric == cmp_rt->fib6_metric &&
	    mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
		return fib6_entry;
5730 5731 5732 5733

	return NULL;
}

5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754
static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
{
	struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
	struct mlxsw_sp_fib6_entry *fib6_replaced;
	struct fib6_info *rt, *rt_replaced;

	if (!fib_node->fib_entry)
		return true;

	fib6_replaced = container_of(fib_node->fib_entry,
				     struct mlxsw_sp_fib6_entry,
				     common);
	rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
	rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
	if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
	    rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
		return false;

	return true;
}

5755
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5756 5757
					struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
					struct fib6_info **rt_arr, unsigned int nrt6)
5758
{
5759 5760
	struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
	struct mlxsw_sp_fib_entry *replaced;
5761
	struct mlxsw_sp_fib_node *fib_node;
5762
	struct fib6_info *rt = rt_arr[0];
5763 5764 5765 5766 5767
	int err;

	if (mlxsw_sp->router->aborted)
		return 0;

5768
	if (rt->fib6_src.plen)
5769 5770
		return -EINVAL;

5771 5772 5773
	if (mlxsw_sp_fib6_rt_should_ignore(rt))
		return 0;

5774 5775 5776 5777
	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
					 &rt->fib6_dst.addr,
					 sizeof(rt->fib6_dst.addr),
					 rt->fib6_dst.plen,
5778 5779 5780 5781
					 MLXSW_SP_L3_PROTO_IPV6);
	if (IS_ERR(fib_node))
		return PTR_ERR(fib_node);

5782 5783
	fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
						nrt6);
5784 5785 5786 5787 5788
	if (IS_ERR(fib6_entry)) {
		err = PTR_ERR(fib6_entry);
		goto err_fib6_entry_create;
	}

5789 5790 5791 5792 5793 5794
	if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
		mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
		return 0;
	}

5795
	replaced = fib_node->fib_entry;
5796
	err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
5797
	if (err)
5798
		goto err_fib_node_entry_link;
5799

5800 5801 5802 5803
	/* Nothing to replace */
	if (!replaced)
		return 0;

5804
	mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5805 5806 5807
	fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
				     common);
	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5808

5809 5810
	return 0;

5811
err_fib_node_entry_link:
5812
	fib_node->fib_entry = replaced;
5813 5814
	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
5815 5816 5817 5818 5819
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
	return err;
}

static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5820 5821
				       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
				       struct fib6_info **rt_arr, unsigned int nrt6)
5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
	struct fib6_info *rt = rt_arr[0];
	int err;

	if (mlxsw_sp->router->aborted)
		return 0;

	if (rt->fib6_src.plen)
		return -EINVAL;

	if (mlxsw_sp_fib6_rt_should_ignore(rt))
		return 0;

	fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
					 &rt->fib6_dst.addr,
					 sizeof(rt->fib6_dst.addr),
					 rt->fib6_dst.plen,
					 MLXSW_SP_L3_PROTO_IPV6);
	if (IS_ERR(fib_node))
		return PTR_ERR(fib_node);

5845
	if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5846 5847 5848 5849
		mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
		return -EINVAL;
	}

5850 5851
	fib6_entry = container_of(fib_node->fib_entry,
				  struct mlxsw_sp_fib6_entry, common);
5852
	err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5853 5854 5855 5856 5857
	if (err)
		goto err_fib6_entry_nexthop_add;

	return 0;

5858 5859 5860 5861 5862
err_fib6_entry_nexthop_add:
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
	return err;
}

5863 5864 5865
static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
				    struct fib6_info **rt_arr, unsigned int nrt6)
5866 5867 5868
{
	struct mlxsw_sp_fib6_entry *fib6_entry;
	struct mlxsw_sp_fib_node *fib_node;
5869
	struct fib6_info *rt = rt_arr[0];
5870
	int err;
5871 5872

	if (mlxsw_sp->router->aborted)
5873
		return 0;
5874 5875

	if (mlxsw_sp_fib6_rt_should_ignore(rt))
5876
		return 0;
5877

5878 5879 5880 5881 5882
	/* Multipath routes are first added to the FIB trie and only then
	 * notified. If we vetoed the addition, we will get a delete
	 * notification for a route we do not have. Therefore, do not warn if
	 * route was not found.
	 */
5883
	fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5884
	if (!fib6_entry)
5885
		return 0;
5886

5887 5888
	/* If not all the nexthops are deleted, then only reduce the nexthop
	 * group.
5889
	 */
5890
	if (nrt6 != fib6_entry->nrt6) {
5891
		mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5892
		return 0;
5893 5894 5895 5896
	}

	fib_node = fib6_entry->common.fib_node;

5897
	err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
5898 5899
	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5900
	return err;
5901 5902
}

5903
static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5904
					    enum mlxsw_sp_l3proto proto,
5905
					    u8 tree_id)
5906
{
5907
	const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
5908 5909
	enum mlxsw_reg_ralxx_protocol ralxx_proto =
				(enum mlxsw_reg_ralxx_protocol) proto;
5910
	struct mlxsw_sp_fib_entry_priv *priv;
5911 5912
	char xralta_pl[MLXSW_REG_XRALTA_LEN];
	char xralst_pl[MLXSW_REG_XRALST_LEN];
5913
	int i, err;
5914

5915
	mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
5916
	err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
5917 5918 5919
	if (err)
		return err;

5920 5921
	mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
	err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
5922 5923 5924
	if (err)
		return err;

5925
	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5926
		struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5927
		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5928
		char xraltb_pl[MLXSW_REG_XRALTB_LEN];
5929

5930
		mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5931
		mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
5932
		err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
5933 5934 5935
		if (err)
			return err;

5936 5937 5938 5939
		priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
		if (IS_ERR(priv))
			return PTR_ERR(priv);

5940
		ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
5941
				       vr->id, 0, NULL, priv);
5942
		ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5943 5944
		err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
		mlxsw_sp_fib_entry_priv_put(priv);
5945 5946 5947 5948 5949
		if (err)
			return err;
	}

	return 0;
5950 5951
}

5952 5953 5954
static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
{
5955
	if (family == RTNL_FAMILY_IPMR)
5956
		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5957 5958
	else
		return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5959 5960
}

5961 5962 5963 5964
static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
				     struct mfc_entry_notifier_info *men_info,
				     bool replace)
{
5965
	struct mlxsw_sp_mr_table *mrt;
5966 5967 5968 5969 5970
	struct mlxsw_sp_vr *vr;

	if (mlxsw_sp->router->aborted)
		return 0;

5971
	vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5972 5973 5974
	if (IS_ERR(vr))
		return PTR_ERR(vr);

5975 5976
	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
	return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5977 5978 5979 5980 5981
}

static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
				      struct mfc_entry_notifier_info *men_info)
{
5982
	struct mlxsw_sp_mr_table *mrt;
5983 5984 5985 5986 5987 5988 5989 5990 5991
	struct mlxsw_sp_vr *vr;

	if (mlxsw_sp->router->aborted)
		return;

	vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
	if (WARN_ON(!vr))
		return;

5992 5993
	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
	mlxsw_sp_mr_route_del(mrt, men_info->mfc);
5994
	mlxsw_sp_vr_put(mlxsw_sp, vr);
5995 5996 5997 5998 5999 6000
}

static int
mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
			      struct vif_entry_notifier_info *ven_info)
{
6001
	struct mlxsw_sp_mr_table *mrt;
6002 6003 6004 6005 6006 6007
	struct mlxsw_sp_rif *rif;
	struct mlxsw_sp_vr *vr;

	if (mlxsw_sp->router->aborted)
		return 0;

6008
	vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
6009 6010 6011
	if (IS_ERR(vr))
		return PTR_ERR(vr);

6012
	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6013
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
6014
	return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
6015 6016 6017 6018 6019 6020 6021 6022
				   ven_info->vif_index,
				   ven_info->vif_flags, rif);
}

static void
mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
			      struct vif_entry_notifier_info *ven_info)
{
6023
	struct mlxsw_sp_mr_table *mrt;
6024 6025 6026 6027 6028 6029 6030 6031 6032
	struct mlxsw_sp_vr *vr;

	if (mlxsw_sp->router->aborted)
		return;

	vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
	if (WARN_ON(!vr))
		return;

6033 6034
	mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
	mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
6035
	mlxsw_sp_vr_put(mlxsw_sp, vr);
6036 6037
}

6038 6039
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
6040
	enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
6041 6042 6043 6044 6045 6046 6047
	int err;

	err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
					       MLXSW_SP_LPM_TREE_MIN);
	if (err)
		return err;

6048 6049 6050 6051
	/* The multicast router code does not need an abort trap as by default,
	 * packets that don't match any routes are trapped to the CPU.
	 */

6052
	proto = MLXSW_SP_L3_PROTO_IPV6;
6053 6054 6055 6056
	return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
						MLXSW_SP_LPM_TREE_MIN + 1);
}

6057 6058 6059
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_fib_node *fib_node)
{
6060
	struct mlxsw_sp_fib4_entry *fib4_entry;
6061

6062 6063 6064 6065 6066
	fib4_entry = container_of(fib_node->fib_entry,
				  struct mlxsw_sp_fib4_entry, common);
	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
	mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6067 6068
}

6069 6070 6071
static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_fib_node *fib_node)
{
6072
	struct mlxsw_sp_fib6_entry *fib6_entry;
6073

6074 6075 6076 6077 6078
	fib6_entry = container_of(fib_node->fib_entry,
				  struct mlxsw_sp_fib6_entry, common);
	mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
	mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
	mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6079 6080
}

6081 6082 6083
static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_fib_node *fib_node)
{
6084
	switch (fib_node->fib->proto) {
6085 6086 6087 6088
	case MLXSW_SP_L3_PROTO_IPV4:
		mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
		break;
	case MLXSW_SP_L3_PROTO_IPV6:
6089
		mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
6090 6091 6092 6093
		break;
	}
}

6094 6095 6096
static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_vr *vr,
				  enum mlxsw_sp_l3proto proto)
6097
{
6098
	struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
6099
	struct mlxsw_sp_fib_node *fib_node, *tmp;
6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111

	list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
		bool do_break = &tmp->list == &fib->node_list;

		mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
		if (do_break)
			break;
	}
}

static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
6112
	int i, j;
6113

J
Jiri Pirko 已提交
6114
	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6115
		struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
6116

6117
		if (!mlxsw_sp_vr_is_used(vr))
6118
			continue;
6119

6120 6121
		for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
			mlxsw_sp_mr_table_flush(vr->mr_table[j]);
6122
		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
6123 6124 6125 6126 6127 6128 6129

		/* If virtual router was only used for IPv4, then it's no
		 * longer used.
		 */
		if (!mlxsw_sp_vr_is_used(vr))
			continue;
		mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
6130
	}
6131 6132 6133 6134 6135 6136 6137 6138 6139 6140

	/* After flushing all the routes, it is not possible anyone is still
	 * using the adjacency index that is discarding packets, so free it in
	 * case it was allocated.
	 */
	if (!mlxsw_sp->router->adj_discard_index_valid)
		return;
	mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
			   mlxsw_sp->router->adj_discard_index);
	mlxsw_sp->router->adj_discard_index_valid = false;
6141 6142
}

6143
static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
6144 6145 6146
{
	int err;

6147
	if (mlxsw_sp->router->aborted)
6148 6149
		return;
	dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
6150
	mlxsw_sp_router_fib_flush(mlxsw_sp);
6151
	mlxsw_sp->router->aborted = true;
6152 6153 6154 6155 6156
	err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
	if (err)
		dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}

6157
struct mlxsw_sp_fib6_event {
6158 6159 6160 6161
	struct fib6_info **rt_arr;
	unsigned int nrt6;
};

6162 6163
struct mlxsw_sp_fib_event {
	struct list_head list; /* node in fib queue */
6164
	union {
6165
		struct mlxsw_sp_fib6_event fib6_event;
6166
		struct fib_entry_notifier_info fen_info;
6167
		struct fib_rule_notifier_info fr_info;
6168
		struct fib_nh_notifier_info fnh_info;
6169 6170
		struct mfc_entry_notifier_info men_info;
		struct vif_entry_notifier_info ven_info;
6171
	};
6172 6173
	struct mlxsw_sp *mlxsw_sp;
	unsigned long event;
6174
	int family;
6175 6176
};

6177
static int
6178 6179
mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
				struct fib6_entry_notifier_info *fen6_info)
6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192
{
	struct fib6_info *rt = fen6_info->rt;
	struct fib6_info **rt_arr;
	struct fib6_info *iter;
	unsigned int nrt6;
	int i = 0;

	nrt6 = fen6_info->nsiblings + 1;

	rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
	if (!rt_arr)
		return -ENOMEM;

6193 6194
	fib6_event->rt_arr = rt_arr;
	fib6_event->nrt6 = nrt6;
6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215

	rt_arr[0] = rt;
	fib6_info_hold(rt);

	if (!fen6_info->nsiblings)
		return 0;

	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
		if (i == fen6_info->nsiblings)
			break;

		rt_arr[i + 1] = iter;
		fib6_info_hold(iter);
		i++;
	}
	WARN_ON_ONCE(i != fen6_info->nsiblings);

	return 0;
}

static void
6216
mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
6217 6218 6219
{
	int i;

6220 6221 6222
	for (i = 0; i < fib6_event->nrt6; i++)
		mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
	kfree(fib6_event->rt_arr);
6223 6224
}

6225
static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
6226
					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6227
					       struct mlxsw_sp_fib_event *fib_event)
6228 6229 6230
{
	int err;

6231 6232
	mlxsw_sp_span_respin(mlxsw_sp);

6233
	switch (fib_event->event) {
6234
	case FIB_EVENT_ENTRY_REPLACE:
6235
		err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
6236 6237
		if (err) {
			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6238
			mlxsw_sp_router_fib_abort(mlxsw_sp);
6239
		}
6240
		fib_info_put(fib_event->fen_info.fi);
6241
		break;
6242
	case FIB_EVENT_ENTRY_DEL:
6243 6244 6245
		err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
		if (err)
			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6246
		fib_info_put(fib_event->fen_info.fi);
6247
		break;
6248
	case FIB_EVENT_NH_ADD:
6249
	case FIB_EVENT_NH_DEL:
6250 6251
		mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
		fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
6252
		break;
6253
	}
6254 6255
}

6256
static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
6257
					       struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6258
					       struct mlxsw_sp_fib_event *fib_event)
6259
{
6260
	int err;
6261

6262 6263
	mlxsw_sp_span_respin(mlxsw_sp);

6264
	switch (fib_event->event) {
6265
	case FIB_EVENT_ENTRY_REPLACE:
6266
		err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6267
						   fib_event->fib6_event.nrt6);
6268 6269
		if (err) {
			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6270
			mlxsw_sp_router_fib_abort(mlxsw_sp);
6271
		}
6272
		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6273
		break;
6274
	case FIB_EVENT_ENTRY_APPEND:
6275
		err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6276
						  fib_event->fib6_event.nrt6);
6277 6278
		if (err) {
			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6279
			mlxsw_sp_router_fib_abort(mlxsw_sp);
6280
		}
6281
		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6282
		break;
6283
	case FIB_EVENT_ENTRY_DEL:
6284 6285 6286 6287
		err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
					       fib_event->fib6_event.nrt6);
		if (err)
			mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6288
		mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6289
		break;
6290
	}
6291 6292
}

6293 6294
static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
						struct mlxsw_sp_fib_event *fib_event)
6295 6296 6297 6298 6299
{
	bool replace;
	int err;

	rtnl_lock();
6300
	mutex_lock(&mlxsw_sp->router->lock);
6301
	switch (fib_event->event) {
6302
	case FIB_EVENT_ENTRY_REPLACE:
6303
	case FIB_EVENT_ENTRY_ADD:
6304
		replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
6305

6306
		err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
6307 6308
		if (err)
			mlxsw_sp_router_fib_abort(mlxsw_sp);
6309
		mr_cache_put(fib_event->men_info.mfc);
6310 6311
		break;
	case FIB_EVENT_ENTRY_DEL:
6312 6313
		mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
		mr_cache_put(fib_event->men_info.mfc);
6314 6315 6316
		break;
	case FIB_EVENT_VIF_ADD:
		err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6317
						    &fib_event->ven_info);
6318 6319
		if (err)
			mlxsw_sp_router_fib_abort(mlxsw_sp);
6320
		dev_put(fib_event->ven_info.dev);
6321 6322
		break;
	case FIB_EVENT_VIF_DEL:
6323 6324
		mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
		dev_put(fib_event->ven_info.dev);
6325 6326
		break;
	}
6327
	mutex_unlock(&mlxsw_sp->router->lock);
6328 6329 6330
	rtnl_unlock();
}

6331 6332 6333
static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
{
	struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
6334
	struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
6335
	struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
6336 6337
	struct mlxsw_sp_fib_event *next_fib_event;
	struct mlxsw_sp_fib_event *fib_event;
6338
	int last_family = AF_UNSPEC;
6339 6340 6341 6342 6343 6344
	LIST_HEAD(fib_event_queue);

	spin_lock_bh(&router->fib_event_queue_lock);
	list_splice_init(&router->fib_event_queue, &fib_event_queue);
	spin_unlock_bh(&router->fib_event_queue_lock);

6345 6346 6347 6348 6349 6350
	/* Router lock is held here to make sure per-instance
	 * operation context is not used in between FIB4/6 events
	 * processing.
	 */
	mutex_lock(&router->lock);
	mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6351 6352 6353 6354 6355 6356 6357
	list_for_each_entry_safe(fib_event, next_fib_event,
				 &fib_event_queue, list) {
		/* Check if the next entry in the queue exists and it is
		 * of the same type (family and event) as the currect one.
		 * In that case it is permitted to do the bulking
		 * of multiple FIB entries to a single register write.
		 */
6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368
		op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
				  fib_event->family == next_fib_event->family &&
				  fib_event->event == next_fib_event->event;

		/* In case family of this and the previous entry are different, context
		 * reinitialization is going to be needed now, indicate that.
		 * Note that since last_family is initialized to AF_UNSPEC, this is always
		 * going to happen for the first entry processed in the work.
		 */
		if (fib_event->family != last_family)
			op_ctx->initialized = false;
6369

6370 6371
		switch (fib_event->family) {
		case AF_INET:
6372
			mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
6373
							   fib_event);
6374 6375
			break;
		case AF_INET6:
6376
			mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
6377
							   fib_event);
6378 6379 6380
			break;
		case RTNL_FAMILY_IP6MR:
		case RTNL_FAMILY_IPMR:
6381 6382 6383 6384 6385
			/* Unlock here as inside FIBMR the lock is taken again
			 * under RTNL. The per-instance operation context
			 * is not used by FIBMR.
			 */
			mutex_unlock(&router->lock);
6386 6387
			mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
							    fib_event);
6388
			mutex_lock(&router->lock);
6389 6390 6391 6392
			break;
		default:
			WARN_ON_ONCE(1);
		}
6393
		last_family = fib_event->family;
6394 6395 6396
		kfree(fib_event);
		cond_resched();
	}
6397
	WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
6398
	mutex_unlock(&router->lock);
6399 6400 6401
}

static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
6402 6403
				       struct fib_notifier_info *info)
{
6404 6405 6406
	struct fib_entry_notifier_info *fen_info;
	struct fib_nh_notifier_info *fnh_info;

6407
	switch (fib_event->event) {
6408
	case FIB_EVENT_ENTRY_REPLACE:
6409
	case FIB_EVENT_ENTRY_DEL:
6410 6411
		fen_info = container_of(info, struct fib_entry_notifier_info,
					info);
6412
		fib_event->fen_info = *fen_info;
6413
		/* Take reference on fib_info to prevent it from being
6414
		 * freed while event is queued. Release it afterwards.
6415
		 */
6416
		fib_info_hold(fib_event->fen_info.fi);
6417
		break;
6418
	case FIB_EVENT_NH_ADD:
6419
	case FIB_EVENT_NH_DEL:
6420 6421
		fnh_info = container_of(info, struct fib_nh_notifier_info,
					info);
6422 6423
		fib_event->fnh_info = *fnh_info;
		fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
6424 6425 6426 6427
		break;
	}
}

6428
static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
6429
				      struct fib_notifier_info *info)
6430
{
6431
	struct fib6_entry_notifier_info *fen6_info;
6432
	int err;
6433

6434
	switch (fib_event->event) {
6435 6436
	case FIB_EVENT_ENTRY_REPLACE:
	case FIB_EVENT_ENTRY_APPEND:
6437
	case FIB_EVENT_ENTRY_DEL:
6438 6439
		fen6_info = container_of(info, struct fib6_entry_notifier_info,
					 info);
6440 6441
		err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
						      fen6_info);
6442 6443
		if (err)
			return err;
6444
		break;
6445
	}
6446 6447

	return 0;
6448 6449
}

6450
static void
6451
mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
6452 6453
			    struct fib_notifier_info *info)
{
6454
	switch (fib_event->event) {
6455 6456
	case FIB_EVENT_ENTRY_REPLACE:
	case FIB_EVENT_ENTRY_ADD:
6457
	case FIB_EVENT_ENTRY_DEL:
6458 6459
		memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
		mr_cache_hold(fib_event->men_info.mfc);
6460
		break;
6461
	case FIB_EVENT_VIF_ADD:
6462
	case FIB_EVENT_VIF_DEL:
6463 6464
		memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
		dev_hold(fib_event->ven_info.dev);
6465
		break;
6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487
	}
}

static int mlxsw_sp_router_fib_rule_event(unsigned long event,
					  struct fib_notifier_info *info,
					  struct mlxsw_sp *mlxsw_sp)
{
	struct netlink_ext_ack *extack = info->extack;
	struct fib_rule_notifier_info *fr_info;
	struct fib_rule *rule;
	int err = 0;

	/* nothing to do at the moment */
	if (event == FIB_EVENT_RULE_DEL)
		return 0;

	if (mlxsw_sp->router->aborted)
		return 0;

	fr_info = container_of(info, struct fib_rule_notifier_info, info);
	rule = fr_info->rule;

6488
	/* Rule only affects locally generated traffic */
6489
	if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6490 6491
		return 0;

6492 6493 6494
	switch (info->family) {
	case AF_INET:
		if (!fib4_rule_default(rule) && !rule->l3mdev)
6495
			err = -EOPNOTSUPP;
6496 6497 6498
		break;
	case AF_INET6:
		if (!fib6_rule_default(rule) && !rule->l3mdev)
6499
			err = -EOPNOTSUPP;
6500 6501 6502
		break;
	case RTNL_FAMILY_IPMR:
		if (!ipmr_rule_default(rule) && !rule->l3mdev)
6503
			err = -EOPNOTSUPP;
6504
		break;
6505 6506
	case RTNL_FAMILY_IP6MR:
		if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6507
			err = -EOPNOTSUPP;
6508
		break;
6509
	}
6510 6511

	if (err < 0)
6512
		NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6513 6514

	return err;
6515 6516
}

6517 6518 6519 6520
/* Called with rcu_read_lock() */
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
				     unsigned long event, void *ptr)
{
6521
	struct mlxsw_sp_fib_event *fib_event;
6522
	struct fib_notifier_info *info = ptr;
6523
	struct mlxsw_sp_router *router;
6524
	int err;
6525

6526
	if ((info->family != AF_INET && info->family != AF_INET6 &&
6527 6528
	     info->family != RTNL_FAMILY_IPMR &&
	     info->family != RTNL_FAMILY_IP6MR))
6529 6530
		return NOTIFY_DONE;

6531 6532 6533
	router = container_of(nb, struct mlxsw_sp_router, fib_nb);

	switch (event) {
6534
	case FIB_EVENT_RULE_ADD:
6535 6536 6537
	case FIB_EVENT_RULE_DEL:
		err = mlxsw_sp_router_fib_rule_event(event, info,
						     router->mlxsw_sp);
6538
		return notifier_from_errno(err);
6539 6540
	case FIB_EVENT_ENTRY_ADD:
	case FIB_EVENT_ENTRY_REPLACE:
6541
	case FIB_EVENT_ENTRY_APPEND:
6542 6543 6544 6545
		if (router->aborted) {
			NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
			return notifier_from_errno(-EINVAL);
		}
6546 6547 6548 6549 6550 6551 6552
		if (info->family == AF_INET) {
			struct fib_entry_notifier_info *fen_info = ptr;

			if (fen_info->fi->fib_nh_is_v6) {
				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
				return notifier_from_errno(-EINVAL);
			}
6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566
			if (fen_info->fi->nh) {
				NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
				return notifier_from_errno(-EINVAL);
			}
		} else if (info->family == AF_INET6) {
			struct fib6_entry_notifier_info *fen6_info;

			fen6_info = container_of(info,
						 struct fib6_entry_notifier_info,
						 info);
			if (fen6_info->rt->nh) {
				NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
				return notifier_from_errno(-EINVAL);
			}
6567
		}
6568
		break;
6569 6570
	}

6571 6572
	fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
	if (!fib_event)
6573 6574
		return NOTIFY_BAD;

6575 6576 6577
	fib_event->mlxsw_sp = router->mlxsw_sp;
	fib_event->event = event;
	fib_event->family = info->family;
6578

6579 6580
	switch (info->family) {
	case AF_INET:
6581
		mlxsw_sp_router_fib4_event(fib_event, info);
6582
		break;
6583
	case AF_INET6:
6584
		err = mlxsw_sp_router_fib6_event(fib_event, info);
6585 6586
		if (err)
			goto err_fib_event;
6587
		break;
6588
	case RTNL_FAMILY_IP6MR:
6589
	case RTNL_FAMILY_IPMR:
6590
		mlxsw_sp_router_fibmr_event(fib_event, info);
6591
		break;
6592 6593
	}

6594 6595 6596 6597 6598
	/* Enqueue the event and trigger the work */
	spin_lock_bh(&router->fib_event_queue_lock);
	list_add_tail(&fib_event->list, &router->fib_event_queue);
	spin_unlock_bh(&router->fib_event_queue_lock);
	mlxsw_core_schedule_work(&router->fib_event_work);
6599

6600
	return NOTIFY_DONE;
6601 6602

err_fib_event:
6603
	kfree(fib_event);
6604
	return NOTIFY_BAD;
6605 6606
}

6607
static struct mlxsw_sp_rif *
6608 6609 6610 6611 6612 6613
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
			 const struct net_device *dev)
{
	int i;

	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6614 6615 6616
		if (mlxsw_sp->router->rifs[i] &&
		    mlxsw_sp->router->rifs[i]->dev == dev)
			return mlxsw_sp->router->rifs[i];
6617 6618 6619 6620

	return NULL;
}

6621 6622 6623
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
			 const struct net_device *dev)
{
6624 6625 6626 6627 6628 6629 6630
	struct mlxsw_sp_rif *rif;

	mutex_lock(&mlxsw_sp->router->lock);
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	mutex_unlock(&mlxsw_sp->router->lock);

	return rif;
6631 6632
}

6633 6634 6635 6636 6637
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
{
	struct mlxsw_sp_rif *rif;
	u16 vid = 0;

6638
	mutex_lock(&mlxsw_sp->router->lock);
6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!rif)
		goto out;

	/* We only return the VID for VLAN RIFs. Otherwise we return an
	 * invalid value (0).
	 */
	if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
		goto out;

	vid = mlxsw_sp_fid_8021q_vid(rif->fid);

out:
6652
	mutex_unlock(&mlxsw_sp->router->lock);
6653 6654 6655
	return vid;
}

6656 6657 6658 6659 6660 6661 6662
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
	char ritr_pl[MLXSW_REG_RITR_LEN];
	int err;

	mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6663
	if (err)
6664 6665 6666 6667 6668 6669 6670
		return err;

	mlxsw_reg_ritr_enable_set(ritr_pl, false);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6671
					  struct mlxsw_sp_rif *rif)
6672
{
6673 6674 6675
	mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
	mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
	mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6676 6677
}

6678 6679 6680
static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
			   unsigned long event)
6681
{
6682 6683 6684 6685
	struct inet6_dev *inet6_dev;
	bool addr_list_empty = true;
	struct in_device *idev;

6686 6687
	switch (event) {
	case NETDEV_UP:
6688
		return rif == NULL;
6689
	case NETDEV_DOWN:
6690 6691
		rcu_read_lock();
		idev = __in_dev_get_rcu(dev);
6692 6693 6694 6695 6696 6697 6698
		if (idev && idev->ifa_list)
			addr_list_empty = false;

		inet6_dev = __in6_dev_get(dev);
		if (addr_list_empty && inet6_dev &&
		    !list_empty(&inet6_dev->addr_list))
			addr_list_empty = false;
6699
		rcu_read_unlock();
6700

6701 6702 6703 6704 6705 6706
		/* macvlans do not have a RIF, but rather piggy back on the
		 * RIF of their lower device.
		 */
		if (netif_is_macvlan(dev) && addr_list_empty)
			return true;

6707
		if (rif && addr_list_empty &&
6708
		    !netif_is_l3_slave(rif->dev))
6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719
			return true;
		/* It is possible we already removed the RIF ourselves
		 * if it was assigned to a netdev that is now a bridge
		 * or LAG slave.
		 */
		return false;
	}

	return false;
}

6720 6721 6722 6723 6724 6725
static enum mlxsw_sp_rif_type
mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
		      const struct net_device *dev)
{
	enum mlxsw_sp_fid_type type;

6726 6727 6728 6729
	if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
		return MLXSW_SP_RIF_TYPE_IPIP_LB;

	/* Otherwise RIF type is derived from the type of the underlying FID. */
6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741
	if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
		type = MLXSW_SP_FID_TYPE_8021Q;
	else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
		type = MLXSW_SP_FID_TYPE_8021Q;
	else if (netif_is_bridge_master(dev))
		type = MLXSW_SP_FID_TYPE_8021D;
	else
		type = MLXSW_SP_FID_TYPE_RFID;

	return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
}

6742
static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6743 6744 6745
{
	int i;

6746 6747 6748 6749 6750 6751
	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
		if (!mlxsw_sp->router->rifs[i]) {
			*p_rif_index = i;
			return 0;
		}
	}
6752

6753
	return -ENOBUFS;
6754 6755
}

6756 6757 6758
static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
					       u16 vr_id,
					       struct net_device *l3_dev)
6759
{
6760
	struct mlxsw_sp_rif *rif;
6761

6762
	rif = kzalloc(rif_size, GFP_KERNEL);
6763
	if (!rif)
6764 6765
		return NULL;

6766 6767
	INIT_LIST_HEAD(&rif->nexthop_list);
	INIT_LIST_HEAD(&rif->neigh_list);
6768 6769 6770 6771 6772
	if (l3_dev) {
		ether_addr_copy(rif->addr, l3_dev->dev_addr);
		rif->mtu = l3_dev->mtu;
		rif->dev = l3_dev;
	}
6773 6774
	rif->vr_id = vr_id;
	rif->rif_index = rif_index;
6775

6776
	return rif;
6777 6778
}

6779 6780 6781 6782 6783 6784
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
					   u16 rif_index)
{
	return mlxsw_sp->router->rifs[rif_index];
}

6785 6786 6787 6788 6789
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
{
	return rif->rif_index;
}

6790 6791 6792 6793 6794 6795 6796
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
	return lb_rif->common.rif_index;
}

u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
6797 6798 6799 6800 6801 6802 6803 6804
	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
	struct mlxsw_sp_vr *ul_vr;

	ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
	if (WARN_ON(IS_ERR(ul_vr)))
		return 0;

	return ul_vr->id;
6805 6806
}

6807 6808 6809 6810 6811
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
	return lb_rif->ul_rif_id;
}

6812 6813 6814 6815 6816
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
	return rif->dev->ifindex;
}

6817 6818 6819 6820 6821
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
{
	return rif->dev;
}

6822
static struct mlxsw_sp_rif *
6823
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6824 6825
		    const struct mlxsw_sp_rif_params *params,
		    struct netlink_ext_ack *extack)
6826
{
6827 6828
	u32 tb_id = l3mdev_fib_table(params->dev);
	const struct mlxsw_sp_rif_ops *ops;
6829
	struct mlxsw_sp_fid *fid = NULL;
6830
	enum mlxsw_sp_rif_type type;
6831
	struct mlxsw_sp_rif *rif;
6832 6833
	struct mlxsw_sp_vr *vr;
	u16 rif_index;
6834
	int i, err;
6835

6836
	type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6837
	ops = mlxsw_sp->rif_ops_arr[type];
6838

6839
	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6840 6841
	if (IS_ERR(vr))
		return ERR_CAST(vr);
6842
	vr->rif_count++;
6843

6844
	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6845
	if (err) {
6846
		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6847
		goto err_rif_index_alloc;
6848
	}
6849

6850
	rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6851 6852 6853 6854
	if (!rif) {
		err = -ENOMEM;
		goto err_rif_alloc;
	}
6855
	dev_hold(rif->dev);
6856
	mlxsw_sp->router->rifs[rif_index] = rif;
6857 6858
	rif->mlxsw_sp = mlxsw_sp;
	rif->ops = ops;
6859

6860
	if (ops->fid_get) {
6861
		fid = ops->fid_get(rif, extack);
6862 6863 6864 6865 6866
		if (IS_ERR(fid)) {
			err = PTR_ERR(fid);
			goto err_fid_get;
		}
		rif->fid = fid;
6867 6868
	}

6869 6870 6871 6872
	if (ops->setup)
		ops->setup(rif, params);

	err = ops->configure(rif);
6873
	if (err)
6874
		goto err_configure;
6875

6876 6877 6878 6879 6880
	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
		err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
		if (err)
			goto err_mr_rif_add;
	}
6881

6882
	mlxsw_sp_rif_counters_alloc(rif);
6883

6884
	return rif;
6885

6886
err_mr_rif_add:
6887 6888
	for (i--; i >= 0; i--)
		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6889
	ops->deconfigure(rif);
6890
err_configure:
6891 6892
	if (fid)
		mlxsw_sp_fid_put(fid);
6893
err_fid_get:
6894
	mlxsw_sp->router->rifs[rif_index] = NULL;
6895
	dev_put(rif->dev);
6896 6897
	kfree(rif);
err_rif_alloc:
6898
err_rif_index_alloc:
6899
	vr->rif_count--;
6900
	mlxsw_sp_vr_put(mlxsw_sp, vr);
6901 6902 6903
	return ERR_PTR(err);
}

6904
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6905
{
6906 6907
	const struct mlxsw_sp_rif_ops *ops = rif->ops;
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6908
	struct mlxsw_sp_fid *fid = rif->fid;
6909
	struct mlxsw_sp_vr *vr;
6910
	int i;
6911

6912
	mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6913
	vr = &mlxsw_sp->router->vrs[rif->vr_id];
6914

6915
	mlxsw_sp_rif_counters_free(rif);
6916 6917
	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
		mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6918
	ops->deconfigure(rif);
6919 6920 6921
	if (fid)
		/* Loopback RIFs are not associated with a FID. */
		mlxsw_sp_fid_put(fid);
6922
	mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6923
	dev_put(rif->dev);
6924
	kfree(rif);
6925
	vr->rif_count--;
6926
	mlxsw_sp_vr_put(mlxsw_sp, vr);
6927 6928
}

6929 6930 6931 6932 6933
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
				 struct net_device *dev)
{
	struct mlxsw_sp_rif *rif;

6934
	mutex_lock(&mlxsw_sp->router->lock);
6935 6936
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!rif)
6937
		goto out;
6938
	mlxsw_sp_rif_destroy(rif);
6939 6940
out:
	mutex_unlock(&mlxsw_sp->router->lock);
6941 6942
}

6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956
static void
mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;

	params->vid = mlxsw_sp_port_vlan->vid;
	params->lag = mlxsw_sp_port->lagged;
	if (params->lag)
		params->lag_id = mlxsw_sp_port->lag_id;
	else
		params->system_port = mlxsw_sp_port->local_port;
}

6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990
static struct mlxsw_sp_rif_subport *
mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
{
	return container_of(rif, struct mlxsw_sp_rif_subport, common);
}

static struct mlxsw_sp_rif *
mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
			 const struct mlxsw_sp_rif_params *params,
			 struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_rif_subport *rif_subport;
	struct mlxsw_sp_rif *rif;

	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
	if (!rif)
		return mlxsw_sp_rif_create(mlxsw_sp, params, extack);

	rif_subport = mlxsw_sp_rif_subport_rif(rif);
	refcount_inc(&rif_subport->ref_count);
	return rif;
}

static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
{
	struct mlxsw_sp_rif_subport *rif_subport;

	rif_subport = mlxsw_sp_rif_subport_rif(rif);
	if (!refcount_dec_and_test(&rif_subport->ref_count))
		return;

	mlxsw_sp_rif_destroy(rif);
}

6991
static int
6992
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6993 6994
			       struct net_device *l3_dev,
			       struct netlink_ext_ack *extack)
6995
{
6996
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6997
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
6998 6999 7000
	struct mlxsw_sp_rif_params params = {
		.dev = l3_dev,
	};
7001
	u16 vid = mlxsw_sp_port_vlan->vid;
7002
	struct mlxsw_sp_rif *rif;
7003
	struct mlxsw_sp_fid *fid;
7004
	int err;
7005

7006 7007 7008 7009
	mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
	rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
	if (IS_ERR(rif))
		return PTR_ERR(rif);
7010

7011
	/* FID was already created, just take a reference */
7012
	fid = rif->ops->fid_get(rif, extack);
7013 7014 7015 7016
	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
	if (err)
		goto err_fid_port_vid_map;

7017
	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
7018 7019 7020
	if (err)
		goto err_port_vid_learning_set;

7021
	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
7022 7023 7024 7025
					BR_STATE_FORWARDING);
	if (err)
		goto err_port_vid_stp_set;

7026
	mlxsw_sp_port_vlan->fid = fid;
7027 7028

	return 0;
7029 7030

err_port_vid_stp_set:
7031
	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7032
err_port_vid_learning_set:
7033 7034 7035
	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
err_fid_port_vid_map:
	mlxsw_sp_fid_put(fid);
7036
	mlxsw_sp_rif_subport_put(rif);
7037
	return err;
7038 7039
}

7040 7041
static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7042
{
7043
	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7044
	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
7045
	struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
7046 7047
	u16 vid = mlxsw_sp_port_vlan->vid;

7048 7049
	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
		return;
7050

7051
	mlxsw_sp_port_vlan->fid = NULL;
7052 7053
	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7054 7055
	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
	mlxsw_sp_fid_put(fid);
7056
	mlxsw_sp_rif_subport_put(rif);
7057 7058
}

7059 7060 7061
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
7062 7063 7064
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;

	mutex_lock(&mlxsw_sp->router->lock);
7065
	__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7066
	mutex_unlock(&mlxsw_sp->router->lock);
7067 7068
}

7069 7070
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
					     struct net_device *port_dev,
7071 7072
					     unsigned long event, u16 vid,
					     struct netlink_ext_ack *extack)
7073 7074
{
	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
7075
	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
7076

7077
	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7078 7079
	if (WARN_ON(!mlxsw_sp_port_vlan))
		return -EINVAL;
7080 7081 7082

	switch (event) {
	case NETDEV_UP:
7083
		return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7084
						      l3_dev, extack);
7085
	case NETDEV_DOWN:
7086
		__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7087 7088 7089 7090 7091 7092 7093
		break;
	}

	return 0;
}

static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
7094 7095
					unsigned long event,
					struct netlink_ext_ack *extack)
7096
{
7097 7098 7099
	if (netif_is_bridge_port(port_dev) ||
	    netif_is_lag_port(port_dev) ||
	    netif_is_ovs_port(port_dev))
7100 7101
		return 0;

7102 7103
	return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
						 MLXSW_SP_DEFAULT_VID, extack);
7104 7105 7106 7107
}

static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
					 struct net_device *lag_dev,
7108 7109
					 unsigned long event, u16 vid,
					 struct netlink_ext_ack *extack)
7110 7111 7112 7113 7114 7115 7116
{
	struct net_device *port_dev;
	struct list_head *iter;
	int err;

	netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
		if (mlxsw_sp_port_dev_check(port_dev)) {
7117 7118
			err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
								port_dev,
7119 7120
								event, vid,
								extack);
7121 7122 7123 7124 7125 7126 7127 7128 7129
			if (err)
				return err;
		}
	}

	return 0;
}

static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
7130 7131
				       unsigned long event,
				       struct netlink_ext_ack *extack)
7132 7133 7134 7135
{
	if (netif_is_bridge_port(lag_dev))
		return 0;

7136 7137
	return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
					     MLXSW_SP_DEFAULT_VID, extack);
7138 7139
}

7140 7141
static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
					  struct net_device *l3_dev,
7142 7143
					  unsigned long event,
					  struct netlink_ext_ack *extack)
7144
{
7145 7146 7147
	struct mlxsw_sp_rif_params params = {
		.dev = l3_dev,
	};
7148
	struct mlxsw_sp_rif *rif;
7149 7150 7151

	switch (event) {
	case NETDEV_UP:
7152
		rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
7153 7154 7155
		if (IS_ERR(rif))
			return PTR_ERR(rif);
		break;
7156
	case NETDEV_DOWN:
7157
		rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7158
		mlxsw_sp_rif_destroy(rif);
7159 7160 7161 7162 7163 7164
		break;
	}

	return 0;
}

7165 7166
static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
					struct net_device *vlan_dev,
7167 7168
					unsigned long event,
					struct netlink_ext_ack *extack)
7169 7170 7171 7172
{
	struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
	u16 vid = vlan_dev_vlan_id(vlan_dev);

7173 7174 7175
	if (netif_is_bridge_port(vlan_dev))
		return 0;

7176
	if (mlxsw_sp_port_dev_check(real_dev))
7177
		return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
7178
							 event, vid, extack);
7179 7180
	else if (netif_is_lag_master(real_dev))
		return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
7181
						     vid, extack);
7182
	else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
7183 7184
		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
						      extack);
7185 7186 7187 7188

	return 0;
}

7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228
static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
{
	u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };

	return ether_addr_equal_masked(mac, vrrp4, mask);
}

static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
{
	u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
	u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };

	return ether_addr_equal_masked(mac, vrrp6, mask);
}

static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
				const u8 *mac, bool adding)
{
	char ritr_pl[MLXSW_REG_RITR_LEN];
	u8 vrrp_id = adding ? mac[5] : 0;
	int err;

	if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
	    !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
		return 0;

	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
	if (err)
		return err;

	if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
		mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
	else
		mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247
static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
				    const struct net_device *macvlan_dev,
				    struct netlink_ext_ack *extack)
{
	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
	struct mlxsw_sp_rif *rif;
	int err;

	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
	if (!rif) {
		NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
		return -EOPNOTSUPP;
	}

	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
				  mlxsw_sp_fid_index(rif->fid), true);
	if (err)
		return err;

7248 7249 7250 7251 7252
	err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
				   macvlan_dev->dev_addr, true);
	if (err)
		goto err_rif_vrrp_add;

7253 7254 7255 7256 7257 7258 7259
	/* Make sure the bridge driver does not have this MAC pointing at
	 * some other port.
	 */
	if (rif->ops->fdb_del)
		rif->ops->fdb_del(rif, macvlan_dev->dev_addr);

	return 0;
7260 7261 7262 7263 7264

err_rif_vrrp_add:
	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
			    mlxsw_sp_fid_index(rif->fid), false);
	return err;
7265 7266
}

7267 7268
static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
				       const struct net_device *macvlan_dev)
7269 7270 7271 7272 7273 7274 7275 7276 7277 7278
{
	struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
	struct mlxsw_sp_rif *rif;

	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
	/* If we do not have a RIF, then we already took care of
	 * removing the macvlan's MAC during RIF deletion.
	 */
	if (!rif)
		return;
7279 7280
	mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
			     false);
7281 7282 7283 7284
	mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
			    mlxsw_sp_fid_index(rif->fid), false);
}

7285 7286 7287
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
			      const struct net_device *macvlan_dev)
{
7288
	mutex_lock(&mlxsw_sp->router->lock);
7289
	__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7290
	mutex_unlock(&mlxsw_sp->router->lock);
7291 7292
}

7293 7294
static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
					   struct net_device *macvlan_dev,
7295 7296 7297 7298 7299 7300 7301
					   unsigned long event,
					   struct netlink_ext_ack *extack)
{
	switch (event) {
	case NETDEV_UP:
		return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
	case NETDEV_DOWN:
7302
		__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7303 7304 7305 7306 7307 7308
		break;
	}

	return 0;
}

7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319
static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
					       struct net_device *dev,
					       const unsigned char *dev_addr,
					       struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_rif *rif;
	int i;

	/* A RIF is not created for macvlan netdevs. Their MAC is used to
	 * populate the FDB
	 */
7320
	if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7321 7322 7323 7324
		return 0;

	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
		rif = mlxsw_sp->router->rifs[i];
7325 7326 7327
		if (rif && rif->ops &&
		    rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
			continue;
7328
		if (rif && rif->dev && rif->dev != dev &&
7329 7330 7331 7332 7333 7334 7335 7336 7337 7338
		    !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
					     mlxsw_sp->mac_mask)) {
			NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
			return -EINVAL;
		}
	}

	return 0;
}

7339 7340
static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
				     struct net_device *dev,
7341 7342
				     unsigned long event,
				     struct netlink_ext_ack *extack)
7343 7344
{
	if (mlxsw_sp_port_dev_check(dev))
7345
		return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7346
	else if (netif_is_lag_master(dev))
7347
		return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7348
	else if (netif_is_bridge_master(dev))
7349 7350
		return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
						      extack);
7351
	else if (is_vlan_dev(dev))
7352 7353
		return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
						    extack);
7354
	else if (netif_is_macvlan(dev))
7355 7356
		return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
						       extack);
7357 7358 7359 7360
	else
		return 0;
}

7361 7362
static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
				   unsigned long event, void *ptr)
7363 7364 7365
{
	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
	struct net_device *dev = ifa->ifa_dev->dev;
7366
	struct mlxsw_sp_router *router;
7367
	struct mlxsw_sp_rif *rif;
7368 7369
	int err = 0;

7370 7371
	/* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
	if (event == NETDEV_UP)
7372
		return NOTIFY_DONE;
7373

7374
	router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7375
	mutex_lock(&router->lock);
7376
	rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7377 7378 7379
	if (!mlxsw_sp_rif_should_config(rif, dev, event))
		goto out;

7380
	err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7381
out:
7382
	mutex_unlock(&router->lock);
7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394
	return notifier_from_errno(err);
}

int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
				  unsigned long event, void *ptr)
{
	struct in_validator_info *ivi = (struct in_validator_info *) ptr;
	struct net_device *dev = ivi->ivi_dev->dev;
	struct mlxsw_sp *mlxsw_sp;
	struct mlxsw_sp_rif *rif;
	int err = 0;

7395 7396
	mlxsw_sp = mlxsw_sp_lower_get(dev);
	if (!mlxsw_sp)
7397
		return NOTIFY_DONE;
7398

7399
	mutex_lock(&mlxsw_sp->router->lock);
7400
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7401
	if (!mlxsw_sp_rif_should_config(rif, dev, event))
7402 7403
		goto out;

7404 7405 7406 7407 7408
	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
						  ivi->extack);
	if (err)
		goto out;

7409
	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7410
out:
7411
	mutex_unlock(&mlxsw_sp->router->lock);
7412 7413 7414
	return notifier_from_errno(err);
}

7415 7416
struct mlxsw_sp_inet6addr_event_work {
	struct work_struct work;
7417
	struct mlxsw_sp *mlxsw_sp;
7418 7419 7420 7421 7422 7423 7424 7425
	struct net_device *dev;
	unsigned long event;
};

static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
{
	struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
		container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7426
	struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7427 7428 7429 7430 7431
	struct net_device *dev = inet6addr_work->dev;
	unsigned long event = inet6addr_work->event;
	struct mlxsw_sp_rif *rif;

	rtnl_lock();
7432
	mutex_lock(&mlxsw_sp->router->lock);
7433 7434 7435 7436 7437

	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!mlxsw_sp_rif_should_config(rif, dev, event))
		goto out;

7438
	__mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7439
out:
7440
	mutex_unlock(&mlxsw_sp->router->lock);
7441 7442 7443 7444 7445 7446
	rtnl_unlock();
	dev_put(dev);
	kfree(inet6addr_work);
}

/* Called with rcu_read_lock() */
7447 7448
static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
				    unsigned long event, void *ptr)
7449 7450 7451 7452
{
	struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
	struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
	struct net_device *dev = if6->idev->dev;
7453
	struct mlxsw_sp_router *router;
7454

7455 7456 7457 7458
	/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
	if (event == NETDEV_UP)
		return NOTIFY_DONE;

7459 7460 7461 7462
	inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
	if (!inet6addr_work)
		return NOTIFY_BAD;

7463
	router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7464
	INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7465
	inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7466 7467 7468 7469 7470 7471 7472 7473
	inet6addr_work->dev = dev;
	inet6addr_work->event = event;
	dev_hold(dev);
	mlxsw_core_schedule_work(&inet6addr_work->work);

	return NOTIFY_DONE;
}

7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484
int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
				   unsigned long event, void *ptr)
{
	struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
	struct net_device *dev = i6vi->i6vi_dev->dev;
	struct mlxsw_sp *mlxsw_sp;
	struct mlxsw_sp_rif *rif;
	int err = 0;

	mlxsw_sp = mlxsw_sp_lower_get(dev);
	if (!mlxsw_sp)
7485
		return NOTIFY_DONE;
7486

7487
	mutex_lock(&mlxsw_sp->router->lock);
7488 7489 7490 7491
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!mlxsw_sp_rif_should_config(rif, dev, event))
		goto out;

7492 7493 7494 7495 7496
	err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
						  i6vi->extack);
	if (err)
		goto out;

7497
	err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7498
out:
7499
	mutex_unlock(&mlxsw_sp->router->lock);
7500 7501 7502
	return notifier_from_errno(err);
}

7503
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7504 7505 7506 7507 7508
			     const char *mac, int mtu)
{
	char ritr_pl[MLXSW_REG_RITR_LEN];
	int err;

7509
	mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7510 7511 7512 7513 7514 7515 7516 7517 7518 7519
	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
	if (err)
		return err;

	mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
	mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
	mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

7520 7521 7522
static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
				  struct mlxsw_sp_rif *rif)
7523
{
7524
	struct net_device *dev = rif->dev;
7525
	u16 fid_index;
7526 7527
	int err;

7528
	fid_index = mlxsw_sp_fid_index(rif->fid);
7529

7530
	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7531 7532 7533
	if (err)
		return err;

7534 7535
	err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
				dev->mtu);
7536 7537 7538
	if (err)
		goto err_rif_edit;

7539
	err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7540 7541 7542
	if (err)
		goto err_rif_fdb_op;

7543 7544
	if (rif->mtu != dev->mtu) {
		struct mlxsw_sp_vr *vr;
7545
		int i;
7546 7547 7548 7549 7550 7551

		/* The RIF is relevant only to its mr_table instance, as unlike
		 * unicast routing, in multicast routing a RIF cannot be shared
		 * between several multicast routing tables.
		 */
		vr = &mlxsw_sp->router->vrs[rif->vr_id];
7552 7553 7554
		for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
			mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
						   rif, dev->mtu);
7555 7556
	}

7557 7558
	ether_addr_copy(rif->addr, dev->dev_addr);
	rif->mtu = dev->mtu;
7559

7560
	netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7561 7562 7563 7564

	return 0;

err_rif_fdb_op:
7565
	mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7566
err_rif_edit:
7567
	mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7568 7569 7570
	return err;
}

7571 7572 7573 7574 7575 7576 7577 7578 7579 7580
static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
			    struct netdev_notifier_pre_changeaddr_info *info)
{
	struct netlink_ext_ack *extack;

	extack = netdev_notifier_info_to_extack(&info->info);
	return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
						   info->dev_addr, extack);
}

7581 7582 7583 7584 7585
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
					 unsigned long event, void *ptr)
{
	struct mlxsw_sp *mlxsw_sp;
	struct mlxsw_sp_rif *rif;
7586
	int err = 0;
7587 7588 7589 7590 7591

	mlxsw_sp = mlxsw_sp_lower_get(dev);
	if (!mlxsw_sp)
		return 0;

7592
	mutex_lock(&mlxsw_sp->router->lock);
7593 7594
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
	if (!rif)
7595
		goto out;
7596 7597

	switch (event) {
7598
	case NETDEV_CHANGEMTU:
7599
	case NETDEV_CHANGEADDR:
7600 7601
		err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
		break;
7602
	case NETDEV_PRE_CHANGEADDR:
7603 7604
		err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
		break;
7605 7606
	}

7607 7608 7609
out:
	mutex_unlock(&mlxsw_sp->router->lock);
	return err;
7610 7611
}

7612
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7613 7614
				  struct net_device *l3_dev,
				  struct netlink_ext_ack *extack)
7615
{
7616
	struct mlxsw_sp_rif *rif;
7617

7618 7619
	/* If netdev is already associated with a RIF, then we need to
	 * destroy it and create a new one with the new virtual router ID.
7620
	 */
7621 7622
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
	if (rif)
7623 7624
		__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
					  extack);
7625

7626
	return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7627 7628
}

7629 7630
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
				    struct net_device *l3_dev)
7631
{
7632
	struct mlxsw_sp_rif *rif;
7633

7634 7635
	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
	if (!rif)
7636
		return;
7637
	__mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7638 7639
}

7640 7641
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
				 struct netdev_notifier_changeupper_info *info)
7642
{
7643 7644
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
	int err = 0;
7645

7646 7647 7648 7649
	/* We do not create a RIF for a macvlan, but only use it to
	 * direct more MAC addresses to the router.
	 */
	if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7650
		return 0;
7651

7652
	mutex_lock(&mlxsw_sp->router->lock);
7653 7654
	switch (event) {
	case NETDEV_PRECHANGEUPPER:
7655
		break;
7656
	case NETDEV_CHANGEUPPER:
7657 7658 7659 7660 7661 7662
		if (info->linking) {
			struct netlink_ext_ack *extack;

			extack = netdev_notifier_info_to_extack(&info->info);
			err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
		} else {
7663
			mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7664
		}
7665 7666
		break;
	}
7667
	mutex_unlock(&mlxsw_sp->router->lock);
7668

7669
	return err;
7670 7671
}

7672 7673
static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
					struct netdev_nested_priv *priv)
7674
{
7675
	struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
7676 7677 7678 7679 7680 7681 7682 7683 7684 7685

	if (!netif_is_macvlan(dev))
		return 0;

	return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
				   mlxsw_sp_fid_index(rif->fid), false);
}

static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
{
7686 7687 7688 7689
	struct netdev_nested_priv priv = {
		.data = (void *)rif,
	};

7690 7691 7692 7693 7694
	if (!netif_is_macvlan_port(rif->dev))
		return 0;

	netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
	return netdev_walk_all_upper_dev_rcu(rif->dev,
7695
					     __mlxsw_sp_rif_macvlan_flush, &priv);
7696 7697
}

7698 7699 7700 7701 7702 7703
static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
				       const struct mlxsw_sp_rif_params *params)
{
	struct mlxsw_sp_rif_subport *rif_subport;

	rif_subport = mlxsw_sp_rif_subport_rif(rif);
7704
	refcount_set(&rif_subport->ref_count, 1);
7705 7706 7707 7708
	rif_subport->vid = params->vid;
	rif_subport->lag = params->lag;
	if (params->lag)
		rif_subport->lag_id = params->lag_id;
7709
	else
7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720
		rif_subport->system_port = params->system_port;
}

static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
{
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_rif_subport *rif_subport;
	char ritr_pl[MLXSW_REG_RITR_LEN];

	rif_subport = mlxsw_sp_rif_subport_rif(rif);
	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7721 7722
			    rif->rif_index, rif->vr_id, rif->dev->mtu);
	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7723 7724 7725 7726 7727 7728 7729 7730 7731 7732
	mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
				  rif_subport->lag ? rif_subport->lag_id :
						     rif_subport->system_port,
				  rif_subport->vid);

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
{
7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749
	int err;

	err = mlxsw_sp_rif_subport_op(rif, true);
	if (err)
		return err;

	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
				  mlxsw_sp_fid_index(rif->fid), true);
	if (err)
		goto err_rif_fdb_op;

	mlxsw_sp_fid_rif_set(rif->fid, rif);
	return 0;

err_rif_fdb_op:
	mlxsw_sp_rif_subport_op(rif, false);
	return err;
7750 7751
}

7752 7753
static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
7754 7755 7756 7757 7758
	struct mlxsw_sp_fid *fid = rif->fid;

	mlxsw_sp_fid_rif_set(fid, NULL);
	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
			    mlxsw_sp_fid_index(fid), false);
7759
	mlxsw_sp_rif_macvlan_flush(rif);
7760 7761 7762 7763
	mlxsw_sp_rif_subport_op(rif, false);
}

static struct mlxsw_sp_fid *
7764 7765
mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
			     struct netlink_ext_ack *extack)
7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786
{
	return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
}

static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
	.type			= MLXSW_SP_RIF_TYPE_SUBPORT,
	.rif_size		= sizeof(struct mlxsw_sp_rif_subport),
	.setup			= mlxsw_sp_rif_subport_setup,
	.configure		= mlxsw_sp_rif_subport_configure,
	.deconfigure		= mlxsw_sp_rif_subport_deconfigure,
	.fid_get		= mlxsw_sp_rif_subport_fid_get,
};

static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
				    enum mlxsw_reg_ritr_if_type type,
				    u16 vid_fid, bool enable)
{
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	char ritr_pl[MLXSW_REG_RITR_LEN];

	mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7787 7788
			    rif->dev->mtu);
	mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7789 7790 7791 7792 7793
	mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

7794
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7795 7796 7797 7798
{
	return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}

7799
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7800 7801
{
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7802
	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7803 7804
	int err;

7805 7806
	err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
				       true);
7807 7808 7809
	if (err)
		return err;

7810 7811 7812 7813 7814
	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
				     mlxsw_sp_router_port(mlxsw_sp), true);
	if (err)
		goto err_fid_mc_flood_set;

7815 7816 7817 7818 7819
	err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
				     mlxsw_sp_router_port(mlxsw_sp), true);
	if (err)
		goto err_fid_bc_flood_set;

7820 7821 7822 7823 7824 7825
	err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
				  mlxsw_sp_fid_index(rif->fid), true);
	if (err)
		goto err_rif_fdb_op;

	mlxsw_sp_fid_rif_set(rif->fid, rif);
7826 7827
	return 0;

7828 7829 7830
err_rif_fdb_op:
	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
			       mlxsw_sp_router_port(mlxsw_sp), false);
7831
err_fid_bc_flood_set:
7832 7833 7834
	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
			       mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
7835
	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7836 7837 7838
	return err;
}

7839
static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7840
{
7841
	u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7842 7843
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_fid *fid = rif->fid;
7844

7845 7846 7847
	mlxsw_sp_fid_rif_set(fid, NULL);
	mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
			    mlxsw_sp_fid_index(fid), false);
7848
	mlxsw_sp_rif_macvlan_flush(rif);
7849 7850
	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
			       mlxsw_sp_router_port(mlxsw_sp), false);
7851 7852
	mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
			       mlxsw_sp_router_port(mlxsw_sp), false);
7853 7854 7855 7856 7857 7858 7859 7860
	mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
}

static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
			 struct netlink_ext_ack *extack)
{
	return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7861 7862
}

7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
	struct switchdev_notifier_fdb_info info;
	struct net_device *dev;

	dev = br_fdb_find_port(rif->dev, mac, 0);
	if (!dev)
		return;

	info.addr = mac;
	info.vid = 0;
	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
				 NULL);
}

static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
	.type			= MLXSW_SP_RIF_TYPE_FID,
	.rif_size		= sizeof(struct mlxsw_sp_rif),
	.configure		= mlxsw_sp_rif_fid_configure,
	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
	.fid_get		= mlxsw_sp_rif_fid_fid_get,
	.fdb_del		= mlxsw_sp_rif_fid_fdb_del,
};

7887
static struct mlxsw_sp_fid *
7888 7889
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
			  struct netlink_ext_ack *extack)
7890
{
7891
	struct net_device *br_dev;
7892 7893 7894 7895 7896
	u16 vid;
	int err;

	if (is_vlan_dev(rif->dev)) {
		vid = vlan_dev_vlan_id(rif->dev);
7897 7898 7899
		br_dev = vlan_dev_real_dev(rif->dev);
		if (WARN_ON(!netif_is_bridge_master(br_dev)))
			return ERR_PTR(-EINVAL);
7900 7901
	} else {
		err = br_vlan_get_pvid(rif->dev, &vid);
7902
		if (err < 0 || !vid) {
7903
			NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7904
			return ERR_PTR(-EINVAL);
7905 7906
		}
	}
7907

7908
	return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7909 7910
}

7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
	u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
	struct switchdev_notifier_fdb_info info;
	struct net_device *br_dev;
	struct net_device *dev;

	br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
	dev = br_fdb_find_port(br_dev, mac, vid);
	if (!dev)
		return;

	info.addr = mac;
	info.vid = vid;
7925 7926
	call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
				 NULL);
7927 7928
}

7929 7930 7931 7932 7933 7934 7935 7936 7937
static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
	.type			= MLXSW_SP_RIF_TYPE_VLAN,
	.rif_size		= sizeof(struct mlxsw_sp_rif),
	.configure		= mlxsw_sp_rif_fid_configure,
	.deconfigure		= mlxsw_sp_rif_fid_deconfigure,
	.fid_get		= mlxsw_sp_rif_vlan_fid_get,
	.fdb_del		= mlxsw_sp_rif_vlan_fdb_del,
};

7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
{
	return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
}

static void
mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
			   const struct mlxsw_sp_rif_params *params)
{
	struct mlxsw_sp_rif_params_ipip_lb *params_lb;
	struct mlxsw_sp_rif_ipip_lb *rif_lb;

	params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
				 common);
	rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
	rif_lb->lb_config = params_lb->lb_config;
}

static int
7958
mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7959 7960 7961 7962 7963 7964 7965
{
	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_vr *ul_vr;
	int err;

7966
	ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7967 7968 7969
	if (IS_ERR(ul_vr))
		return PTR_ERR(ul_vr);

7970
	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7971 7972 7973 7974
	if (err)
		goto err_loopback_op;

	lb_rif->ul_vr_id = ul_vr->id;
7975
	lb_rif->ul_rif_id = 0;
7976 7977 7978 7979
	++ul_vr->rif_count;
	return 0;

err_loopback_op:
7980
	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7981 7982 7983
	return err;
}

7984
static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7985 7986 7987 7988 7989 7990
{
	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_vr *ul_vr;

	ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7991
	mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
7992 7993

	--ul_vr->rif_count;
7994
	mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7995 7996
}

7997
static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
7998 7999 8000
	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
8001 8002
	.configure		= mlxsw_sp1_rif_ipip_lb_configure,
	.deconfigure		= mlxsw_sp1_rif_ipip_lb_deconfigure,
8003 8004
};

8005
const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
8006
	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
8007
	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
8008
	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
8009 8010 8011
	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp1_rif_ipip_lb_ops,
};

8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111
static int
mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
{
	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
	char ritr_pl[MLXSW_REG_RITR_LEN];

	mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
			    ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
	mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
					     MLXSW_REG_RITR_LOOPBACK_GENERIC);

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}

static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
		       struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_rif *ul_rif;
	u16 rif_index;
	int err;

	err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
	if (err) {
		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
		return ERR_PTR(err);
	}

	ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
	if (!ul_rif)
		return ERR_PTR(-ENOMEM);

	mlxsw_sp->router->rifs[rif_index] = ul_rif;
	ul_rif->mlxsw_sp = mlxsw_sp;
	err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
	if (err)
		goto ul_rif_op_err;

	return ul_rif;

ul_rif_op_err:
	mlxsw_sp->router->rifs[rif_index] = NULL;
	kfree(ul_rif);
	return ERR_PTR(err);
}

static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
{
	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;

	mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
	mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
	kfree(ul_rif);
}

static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
		    struct netlink_ext_ack *extack)
{
	struct mlxsw_sp_vr *vr;
	int err;

	vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
	if (IS_ERR(vr))
		return ERR_CAST(vr);

	if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
		return vr->ul_rif;

	vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
	if (IS_ERR(vr->ul_rif)) {
		err = PTR_ERR(vr->ul_rif);
		goto err_ul_rif_create;
	}

	vr->rif_count++;
	refcount_set(&vr->ul_rif_refcnt, 1);

	return vr->ul_rif;

err_ul_rif_create:
	mlxsw_sp_vr_put(mlxsw_sp, vr);
	return ERR_PTR(err);
}

static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
{
	struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
	struct mlxsw_sp_vr *vr;

	vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];

	if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
		return;

	vr->rif_count--;
	mlxsw_sp_ul_rif_destroy(ul_rif);
	mlxsw_sp_vr_put(mlxsw_sp, vr);
}

8112 8113 8114 8115
int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
			       u16 *ul_rif_index)
{
	struct mlxsw_sp_rif *ul_rif;
8116
	int err = 0;
8117

8118
	mutex_lock(&mlxsw_sp->router->lock);
8119
	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8120 8121 8122 8123
	if (IS_ERR(ul_rif)) {
		err = PTR_ERR(ul_rif);
		goto out;
	}
8124
	*ul_rif_index = ul_rif->rif_index;
8125 8126 8127
out:
	mutex_unlock(&mlxsw_sp->router->lock);
	return err;
8128 8129 8130 8131 8132 8133
}

void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
{
	struct mlxsw_sp_rif *ul_rif;

8134
	mutex_lock(&mlxsw_sp->router->lock);
8135 8136
	ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
	if (WARN_ON(!ul_rif))
8137
		goto out;
8138 8139

	mlxsw_sp_ul_rif_put(ul_rif);
8140 8141
out:
	mutex_unlock(&mlxsw_sp->router->lock);
8142 8143
}

8144 8145 8146
static int
mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
{
8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163
	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
	u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_rif *ul_rif;
	int err;

	ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
	if (IS_ERR(ul_rif))
		return PTR_ERR(ul_rif);

	err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
	if (err)
		goto err_loopback_op;

	lb_rif->ul_vr_id = 0;
	lb_rif->ul_rif_id = ul_rif->rif_index;

8164
	return 0;
8165 8166 8167 8168

err_loopback_op:
	mlxsw_sp_ul_rif_put(ul_rif);
	return err;
8169 8170 8171 8172
}

static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
{
8173 8174 8175 8176 8177 8178 8179
	struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
	struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
	struct mlxsw_sp_rif *ul_rif;

	ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
	mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
	mlxsw_sp_ul_rif_put(ul_rif);
8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194
}

static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
	.type			= MLXSW_SP_RIF_TYPE_IPIP_LB,
	.rif_size		= sizeof(struct mlxsw_sp_rif_ipip_lb),
	.setup                  = mlxsw_sp_rif_ipip_lb_setup,
	.configure		= mlxsw_sp2_rif_ipip_lb_configure,
	.deconfigure		= mlxsw_sp2_rif_ipip_lb_deconfigure,
};

const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
	[MLXSW_SP_RIF_TYPE_SUBPORT]	= &mlxsw_sp_rif_subport_ops,
	[MLXSW_SP_RIF_TYPE_VLAN]	= &mlxsw_sp_rif_vlan_emu_ops,
	[MLXSW_SP_RIF_TYPE_FID]		= &mlxsw_sp_rif_fid_ops,
	[MLXSW_SP_RIF_TYPE_IPIP_LB]	= &mlxsw_sp2_rif_ipip_lb_ops,
8195 8196
};

8197 8198 8199 8200 8201 8202 8203 8204 8205
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
{
	u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);

	mlxsw_sp->router->rifs = kcalloc(max_rifs,
					 sizeof(struct mlxsw_sp_rif *),
					 GFP_KERNEL);
	if (!mlxsw_sp->router->rifs)
		return -ENOMEM;
8206

8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219
	return 0;
}

static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
{
	int i;

	for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
		WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);

	kfree(mlxsw_sp->router->rifs);
}

8220 8221 8222 8223 8224 8225 8226 8227 8228
static int
mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
{
	char tigcr_pl[MLXSW_REG_TIGCR_LEN];

	mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
}

8229 8230
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
8231 8232
	int err;

8233
	mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
8234
	INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
8235 8236 8237 8238 8239 8240 8241 8242

	err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
	if (err)
		return err;
	err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
	if (err)
		return err;

8243
	return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
8244 8245 8246 8247
}

static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
{
8248
	WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
8249 8250
}

8251 8252
static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
{
8253
	struct mlxsw_sp_router *router;
8254 8255 8256 8257 8258 8259

	/* Flush pending FIB notifications and then flush the device's
	 * table before requesting another dump. The FIB notification
	 * block is unregistered, so no need to take RTNL.
	 */
	mlxsw_core_flush_owq();
8260 8261
	router = container_of(nb, struct mlxsw_sp_router, fib_nb);
	mlxsw_sp_router_fib_flush(router->mlxsw_sp);
8262 8263
}

8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
{
	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
}

static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
{
	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
}

8275
static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8276
{
8277 8278
	struct net *net = mlxsw_sp_net(mlxsw_sp);
	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292

	mlxsw_sp_mp_hash_header_set(recr2_pl,
				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
	if (only_l3)
		return;
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
}

8293
static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8294
{
8295
	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8296

8297 8298 8299 8300 8301 8302
	mlxsw_sp_mp_hash_header_set(recr2_pl,
				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313
	if (only_l3) {
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
	} else {
		mlxsw_sp_mp_hash_header_set(recr2_pl,
					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
	}
8314 8315 8316 8317 8318 8319 8320
}

static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
	char recr2_pl[MLXSW_REG_RECR2_LEN];
	u32 seed;

8321
	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8322
	mlxsw_reg_recr2_pack(recr2_pl, seed);
8323 8324
	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8325 8326 8327 8328 8329 8330 8331 8332 8333 8334

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
#else
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
	return 0;
}
#endif

8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352
static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
{
	char rdpm_pl[MLXSW_REG_RDPM_LEN];
	unsigned int i;

	MLXSW_REG_ZERO(rdpm, rdpm_pl);

	/* HW is determining switch priority based on DSCP-bits, but the
	 * kernel is still doing that based on the ToS. Since there's a
	 * mismatch in bits we need to make sure to translate the right
	 * value ToS would observe, skipping the 2 least-significant ECN bits.
	 */
	for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
		mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
}

8353 8354
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
8355 8356
	struct net *net = mlxsw_sp_net(mlxsw_sp);
	bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8357 8358 8359 8360 8361 8362 8363
	char rgcr_pl[MLXSW_REG_RGCR_LEN];
	u64 max_rifs;

	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
		return -EIO;
	max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);

8364
	mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8365
	mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8366
	mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8367
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8368 8369 8370 8371 8372 8373
}

static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
	char rgcr_pl[MLXSW_REG_RGCR_LEN];

8374
	mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8375 8376 8377
	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}

8378 8379 8380 8381
static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
	.ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
	.ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
	.raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
8382
	.fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
8383 8384 8385 8386 8387 8388
	.fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
	.fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
	.fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
	.fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
	.fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
	.fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
8389
	.fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
8390 8391
};

8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406
static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
{
	size_t max_size = 0;
	int i;

	for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
		size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;

		if (size > max_size)
			max_size = size;
	}
	router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
				    GFP_KERNEL);
	if (!router->ll_op_ctx)
		return -ENOMEM;
8407
	INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
8408 8409 8410 8411 8412
	return 0;
}

static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
{
8413
	WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
8414 8415 8416
	kfree(router->ll_op_ctx);
}

8417 8418
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
			 struct netlink_ext_ack *extack)
8419
{
8420
	struct mlxsw_sp_router *router;
8421 8422
	int err;

8423 8424 8425
	router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
	if (!router)
		return -ENOMEM;
8426
	mutex_init(&router->lock);
8427 8428 8429
	mlxsw_sp->router = router;
	router->mlxsw_sp = mlxsw_sp;

8430 8431 8432
	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
	router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;

8433 8434 8435 8436
	err = mlxsw_sp_router_ll_op_ctx_init(router);
	if (err)
		goto err_ll_op_ctx_init;

8437
	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8438 8439
	err = __mlxsw_sp_router_init(mlxsw_sp);
	if (err)
8440
		goto err_router_init;
8441

8442 8443 8444 8445
	err = mlxsw_sp_rifs_init(mlxsw_sp);
	if (err)
		goto err_rifs_init;

8446 8447 8448 8449
	err = mlxsw_sp_ipips_init(mlxsw_sp);
	if (err)
		goto err_ipips_init;

8450
	err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8451 8452 8453 8454
			      &mlxsw_sp_nexthop_ht_params);
	if (err)
		goto err_nexthop_ht_init;

8455
	err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8456 8457 8458 8459
			      &mlxsw_sp_nexthop_group_ht_params);
	if (err)
		goto err_nexthop_group_ht_init;

8460
	INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8461 8462 8463 8464
	err = mlxsw_sp_lpm_init(mlxsw_sp);
	if (err)
		goto err_lpm_init;

8465 8466 8467 8468
	err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
	if (err)
		goto err_mr_init;

8469 8470 8471 8472
	err = mlxsw_sp_vrs_init(mlxsw_sp);
	if (err)
		goto err_vrs_init;

8473
	err = mlxsw_sp_neigh_init(mlxsw_sp);
8474 8475 8476
	if (err)
		goto err_neigh_init;

8477 8478 8479 8480
	err = mlxsw_sp_mp_hash_init(mlxsw_sp);
	if (err)
		goto err_mp_hash_init;

8481 8482 8483 8484
	err = mlxsw_sp_dscp_init(mlxsw_sp);
	if (err)
		goto err_dscp_init;

8485 8486 8487 8488
	INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
	INIT_LIST_HEAD(&router->fib_event_queue);
	spin_lock_init(&router->fib_event_queue_lock);

8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504
	router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
	err = register_inetaddr_notifier(&router->inetaddr_nb);
	if (err)
		goto err_register_inetaddr_notifier;

	router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
	err = register_inet6addr_notifier(&router->inet6addr_nb);
	if (err)
		goto err_register_inet6addr_notifier;

	mlxsw_sp->router->netevent_nb.notifier_call =
		mlxsw_sp_router_netevent_event;
	err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
	if (err)
		goto err_register_netevent_notifier;

8505
	mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8506 8507
	err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
				    &mlxsw_sp->router->fib_nb,
8508
				    mlxsw_sp_router_fib_dump_flush, extack);
8509 8510 8511
	if (err)
		goto err_register_fib_notifier;

8512 8513
	return 0;

8514
err_register_fib_notifier:
8515 8516
	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
8517 8518 8519 8520 8521
	unregister_inet6addr_notifier(&router->inet6addr_nb);
err_register_inet6addr_notifier:
	unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
	mlxsw_core_flush_owq();
8522
	WARN_ON(!list_empty(&router->fib_event_queue));
8523 8524
err_dscp_init:
err_mp_hash_init:
8525
	mlxsw_sp_neigh_fini(mlxsw_sp);
8526 8527 8528
err_neigh_init:
	mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
8529 8530
	mlxsw_sp_mr_fini(mlxsw_sp);
err_mr_init:
8531 8532
	mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
8533
	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8534
err_nexthop_group_ht_init:
8535
	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8536
err_nexthop_ht_init:
8537 8538
	mlxsw_sp_ipips_fini(mlxsw_sp);
err_ipips_init:
8539 8540
	mlxsw_sp_rifs_fini(mlxsw_sp);
err_rifs_init:
8541
	__mlxsw_sp_router_fini(mlxsw_sp);
8542
err_router_init:
8543 8544
	mlxsw_sp_router_ll_op_ctx_fini(router);
err_ll_op_ctx_init:
8545
	mutex_destroy(&mlxsw_sp->router->lock);
8546
	kfree(mlxsw_sp->router);
8547 8548 8549 8550 8551
	return err;
}

void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
8552 8553
	unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
				&mlxsw_sp->router->fib_nb);
8554
	unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8555 8556 8557
	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
	mlxsw_core_flush_owq();
8558
	WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
8559 8560
	mlxsw_sp_neigh_fini(mlxsw_sp);
	mlxsw_sp_vrs_fini(mlxsw_sp);
8561
	mlxsw_sp_mr_fini(mlxsw_sp);
8562
	mlxsw_sp_lpm_fini(mlxsw_sp);
8563 8564
	rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
	rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8565
	mlxsw_sp_ipips_fini(mlxsw_sp);
8566
	mlxsw_sp_rifs_fini(mlxsw_sp);
8567
	__mlxsw_sp_router_fini(mlxsw_sp);
8568
	mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
8569
	mutex_destroy(&mlxsw_sp->router->lock);
8570
	kfree(mlxsw_sp->router);
8571
}