en_main.c 91.0 KB
Newer Older
1
/*
2
 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

33 34
#include <net/tc_act/tc_gact.h>
#include <net/pkt_cls.h>
35
#include <linux/mlx5/fs.h>
36
#include <net/vxlan.h>
37
#include "en.h"
38
#include "en_tc.h"
39
#include "eswitch.h"
40
#include "vxlan.h"
41 42

struct mlx5e_rq_param {
43 44 45
	u32			rqc[MLX5_ST_SZ_DW(rqc)];
	struct mlx5_wq_param	wq;
	bool			am_enabled;
46 47 48 49 50
};

struct mlx5e_sq_param {
	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
	struct mlx5_wq_param       wq;
51
	u16                        max_inline;
52
	u8                         min_inline_mode;
T
Tariq Toukan 已提交
53
	bool                       icosq;
54 55 56 57 58 59
};

struct mlx5e_cq_param {
	u32                        cqc[MLX5_ST_SZ_DW(cqc)];
	struct mlx5_wq_param       wq;
	u16                        eq_ix;
T
Tariq Toukan 已提交
60
	u8                         cq_period_mode;
61 62 63 64 65
};

struct mlx5e_channel_param {
	struct mlx5e_rq_param      rq;
	struct mlx5e_sq_param      sq;
T
Tariq Toukan 已提交
66
	struct mlx5e_sq_param      icosq;
67 68
	struct mlx5e_cq_param      rx_cq;
	struct mlx5e_cq_param      tx_cq;
T
Tariq Toukan 已提交
69
	struct mlx5e_cq_param      icosq_cq;
70 71 72 73 74 75 76 77
};

static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	u8 port_state;

	port_state = mlx5_query_vport_state(mdev,
78
		MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
79

80 81
	if (port_state == VPORT_STATE_UP) {
		netdev_info(priv->netdev, "Link up\n");
82
		netif_carrier_on(priv->netdev);
83 84
	} else {
		netdev_info(priv->netdev, "Link down\n");
85
		netif_carrier_off(priv->netdev);
86
	}
87 88 89 90 91 92 93 94 95 96 97 98 99
}

static void mlx5e_update_carrier_work(struct work_struct *work)
{
	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
					       update_carrier_work);

	mutex_lock(&priv->state_lock);
	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
		mlx5e_update_carrier(priv);
	mutex_unlock(&priv->state_lock);
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void mlx5e_tx_timeout_work(struct work_struct *work)
{
	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
					       tx_timeout_work);
	int err;

	rtnl_lock();
	mutex_lock(&priv->state_lock);
	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
		goto unlock;
	mlx5e_close_locked(priv->netdev);
	err = mlx5e_open_locked(priv->netdev);
	if (err)
		netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
			   err);
unlock:
	mutex_unlock(&priv->state_lock);
	rtnl_unlock();
}

120
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
121
{
122
	struct mlx5e_sw_stats *s = &priv->stats.sw;
123 124
	struct mlx5e_rq_stats *rq_stats;
	struct mlx5e_sq_stats *sq_stats;
125
	u64 tx_offload_none = 0;
126 127
	int i, j;

128
	memset(s, 0, sizeof(*s));
129 130 131
	for (i = 0; i < priv->params.num_channels; i++) {
		rq_stats = &priv->channel[i]->rq.stats;

132 133
		s->rx_packets	+= rq_stats->packets;
		s->rx_bytes	+= rq_stats->bytes;
134 135
		s->rx_lro_packets += rq_stats->lro_packets;
		s->rx_lro_bytes	+= rq_stats->lro_bytes;
136
		s->rx_csum_none	+= rq_stats->csum_none;
137 138
		s->rx_csum_complete += rq_stats->csum_complete;
		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
139
		s->rx_wqe_err   += rq_stats->wqe_err;
140
		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
141
		s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
142
		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
T
Tariq Toukan 已提交
143 144
		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
145

146
		for (j = 0; j < priv->params.num_tc; j++) {
147 148
			sq_stats = &priv->channel[i]->sq[j].stats;

149 150
			s->tx_packets		+= sq_stats->packets;
			s->tx_bytes		+= sq_stats->bytes;
151 152 153 154
			s->tx_tso_packets	+= sq_stats->tso_packets;
			s->tx_tso_bytes		+= sq_stats->tso_bytes;
			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
155 156 157
			s->tx_queue_stopped	+= sq_stats->stopped;
			s->tx_queue_wake	+= sq_stats->wake;
			s->tx_queue_dropped	+= sq_stats->dropped;
158
			s->tx_xmit_more		+= sq_stats->xmit_more;
159 160
			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
			tx_offload_none		+= sq_stats->csum_none;
161 162 163
		}
	}

164
	/* Update calculated offload counters */
165 166
	s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
	s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
167

168
	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
169 170
				priv->stats.pport.phy_counters,
				counter_set.phys_layer_cntrs.link_down_events);
171 172 173 174 175 176
}

static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
177
	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
178 179
	struct mlx5_core_dev *mdev = priv->mdev;

180 181 182 183 184 185
	MLX5_SET(query_vport_counter_in, in, opcode,
		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
	MLX5_SET(query_vport_counter_in, in, other_vport, 0);

	memset(out, 0, outlen);
186 187 188 189 190 191 192 193
	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}

static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
{
	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
	struct mlx5_core_dev *mdev = priv->mdev;
	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
194
	int prio;
195 196 197 198 199
	void *out;
	u32 *in;

	in = mlx5_vzalloc(sz);
	if (!in)
200 201
		goto free_out;

202
	MLX5_SET(ppcnt_reg, in, local_port, 1);
203

204 205 206
	out = pstats->IEEE_802_3_counters;
	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
207

208 209 210 211 212 213 214
	out = pstats->RFC_2863_counters;
	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);

	out = pstats->RFC_2819_counters;
	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
215

216 217 218 219
	out = pstats->phy_counters;
	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);

220 221 222 223 224 225 226 227
	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
		out = pstats->per_prio_counters[prio];
		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
		mlx5_core_access_reg(mdev, in, sz, out, sz,
				     MLX5_REG_PPCNT, 0, 0);
	}

228
free_out:
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	kvfree(in);
}

static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
{
	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;

	if (!priv->q_counter)
		return;

	mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
				      &qcnt->rx_out_of_buffer);
}

void mlx5e_update_stats(struct mlx5e_priv *priv)
{
	mlx5e_update_q_counter(priv);
	mlx5e_update_vport_counters(priv);
	mlx5e_update_pport_counters(priv);
248
	mlx5e_update_sw_counters(priv);
249 250
}

251
void mlx5e_update_stats_work(struct work_struct *work)
252 253 254 255 256 257
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
					       update_stats_work);
	mutex_lock(&priv->state_lock);
	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
258
		priv->profile->update_stats(priv);
259 260
		queue_delayed_work(priv->wq, dwork,
				   msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
261 262 263 264
	}
	mutex_unlock(&priv->state_lock);
}

265 266
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
			      enum mlx5_dev_event event, unsigned long param)
267
{
268 269
	struct mlx5e_priv *priv = vpriv;

270
	if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
271 272
		return;

273 274 275
	switch (event) {
	case MLX5_DEV_EVENT_PORT_UP:
	case MLX5_DEV_EVENT_PORT_DOWN:
276
		queue_work(priv->wq, &priv->update_carrier_work);
277 278 279 280 281 282 283 284 285
		break;

	default:
		break;
	}
}

static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
286
	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
287 288 289 290
}

static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
291
	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
292
	synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
293 294
}

S
Saeed Mahameed 已提交
295 296 297
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))

298 299 300 301 302 303 304 305
static int mlx5e_create_rq(struct mlx5e_channel *c,
			   struct mlx5e_rq_param *param,
			   struct mlx5e_rq *rq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;
	void *rqc = param->rqc;
	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
306
	u32 byte_count;
307 308 309 310
	int wq_sz;
	int err;
	int i;

311 312
	param->wq.db_numa_node = cpu_to_node(c->cpu);

313 314 315 316 317 318 319 320 321
	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
				&rq->wq_ctrl);
	if (err)
		return err;

	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];

	wq_sz = mlx5_wq_ll_get_size(&rq->wq);

322 323 324 325 326 327 328 329 330 331
	switch (priv->params.rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
					    GFP_KERNEL, cpu_to_node(c->cpu));
		if (!rq->wqe_info) {
			err = -ENOMEM;
			goto err_rq_wq_destroy;
		}
		rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
		rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
332
		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
333

334 335 336
		rq->mpwqe_mtt_offset = c->ix *
			MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));

337 338 339
		rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
		rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
		rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
340 341 342 343 344 345 346 347 348 349 350
		byte_count = rq->wqe_sz;
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
				       cpu_to_node(c->cpu));
		if (!rq->skb) {
			err = -ENOMEM;
			goto err_rq_wq_destroy;
		}
		rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
		rq->alloc_wqe = mlx5e_alloc_rx_wqe;
351
		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
352 353 354 355

		rq->wqe_sz = (priv->params.lro_en) ?
				priv->params.lro_wqe_sz :
				MLX5E_SW2HW_MTU(priv->netdev->mtu);
356 357
		rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
		byte_count = rq->wqe_sz;
358 359
		byte_count |= MLX5_HW_START_PADDING;
	}
360 361 362 363

	for (i = 0; i < wq_sz; i++) {
		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);

364
		wqe->data.byte_count = cpu_to_be32(byte_count);
365 366
	}

367 368 369
	INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
	rq->am.mode = priv->params.rx_cq_period_mode;

370
	rq->wq_type = priv->params.rq_wq_type;
371 372
	rq->pdev    = c->pdev;
	rq->netdev  = c->netdev;
373
	rq->tstamp  = &priv->tstamp;
374 375
	rq->channel = c;
	rq->ix      = c->ix;
376
	rq->priv    = c->priv;
377 378
	rq->mkey_be = c->mkey_be;
	rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
379 380 381 382 383 384 385 386 387 388 389

	return 0;

err_rq_wq_destroy:
	mlx5_wq_destroy(&rq->wq_ctrl);

	return err;
}

static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
390 391 392 393 394 395 396 397
	switch (rq->wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		kfree(rq->wqe_info);
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		kfree(rq->skb);
	}

398 399 400 401 402
	mlx5_wq_destroy(&rq->wq_ctrl);
}

static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
403
	struct mlx5e_priv *priv = rq->priv;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *rqc;
	void *wq;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
		sizeof(u64) * rq->wq_ctrl.buf.npages;
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
	wq  = MLX5_ADDR_OF(rqc, rqc, wq);

	memcpy(rqc, param->rqc, sizeof(param->rqc));

423
	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
424
	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
425
	MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
426
	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
427
						MLX5_ADAPTER_PAGE_SHIFT);
428 429 430 431 432
	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);

	mlx5_fill_page_array(&rq->wq_ctrl.buf,
			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));

433
	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
434 435 436 437 438 439

	kvfree(in);

	return err;
}

440 441
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
				 int next_state)
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
{
	struct mlx5e_channel *c = rq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *rqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);

	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
	MLX5_SET(rqc, rqc, state, next_state);

462
	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
463 464 465 466 467 468

	kvfree(in);

	return err;
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
	struct mlx5e_channel *c = rq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *rqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);

	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
488 489
	MLX5_SET64(modify_rq_in, in, modify_bitmask,
		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
490 491 492 493 494 495 496 497 498 499
	MLX5_SET(rqc, rqc, vsd, vsd);
	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);

	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);

	kvfree(in);

	return err;
}

500 501
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
502
	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
503 504 505 506
}

static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
507
	unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
508 509 510 511
	struct mlx5e_channel *c = rq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_wq_ll *wq = &rq->wq;

512
	while (time_before(jiffies, exp_time)) {
513 514 515 516 517 518 519 520 521
		if (wq->cur_sz >= priv->params.min_rx_wqes)
			return 0;

		msleep(20);
	}

	return -ETIMEDOUT;
}

522 523 524 525 526 527 528
static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
	struct mlx5_wq_ll *wq = &rq->wq;
	struct mlx5e_rx_wqe *wqe;
	__be16 wqe_ix_be;
	u16 wqe_ix;

529 530 531 532
	/* UMR WQE (if in progress) is always at wq->head */
	if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
		mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);

533 534 535 536 537 538 539 540 541 542
	while (!mlx5_wq_ll_is_empty(wq)) {
		wqe_ix_be = *wq->tail_next;
		wqe_ix    = be16_to_cpu(wqe_ix_be);
		wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
		rq->dealloc_wqe(rq, wqe_ix);
		mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
			       &wqe->next.next_wqe_index);
	}
}

543 544 545 546
static int mlx5e_open_rq(struct mlx5e_channel *c,
			 struct mlx5e_rq_param *param,
			 struct mlx5e_rq *rq)
{
T
Tariq Toukan 已提交
547 548
	struct mlx5e_sq *sq = &c->icosq;
	u16 pi = sq->pc & sq->wq.sz_m1;
549 550 551 552 553 554 555 556 557 558
	int err;

	err = mlx5e_create_rq(c, param, rq);
	if (err)
		return err;

	err = mlx5e_enable_rq(rq, param);
	if (err)
		goto err_destroy_rq;

559
	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
560 561 562
	if (err)
		goto err_disable_rq;

563 564 565
	if (param->am_enabled)
		set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);

T
Tariq Toukan 已提交
566 567 568
	sq->ico_wqe_info[pi].opcode     = MLX5_OPCODE_NOP;
	sq->ico_wqe_info[pi].num_wqebbs = 1;
	mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
569 570 571 572 573 574 575 576 577 578 579 580 581

	return 0;

err_disable_rq:
	mlx5e_disable_rq(rq);
err_destroy_rq:
	mlx5e_destroy_rq(rq);

	return err;
}

static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
582
	set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
583
	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
584 585
	cancel_work_sync(&rq->am.work);

586
	mlx5e_disable_rq(rq);
587
	mlx5e_free_rx_descs(rq);
588 589 590 591 592
	mlx5e_destroy_rq(rq);
}

static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
{
593
	kfree(sq->wqe_info);
594 595 596 597 598 599 600 601 602 603 604 605
	kfree(sq->dma_fifo);
	kfree(sq->skb);
}

static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
{
	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
	int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;

	sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
	sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
				    numa);
606 607
	sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
				    numa);
608

609
	if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
		mlx5e_free_sq_db(sq);
		return -ENOMEM;
	}

	sq->dma_fifo_mask = df_sz - 1;

	return 0;
}

static int mlx5e_create_sq(struct mlx5e_channel *c,
			   int tc,
			   struct mlx5e_sq_param *param,
			   struct mlx5e_sq *sq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *sqc = param->sqc;
	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
	int err;

631
	err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
632 633 634
	if (err)
		return err;

635 636
	param->wq.db_numa_node = cpu_to_node(c->cpu);

637 638 639 640 641 642
	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
				 &sq->wq_ctrl);
	if (err)
		goto err_unmap_free_uar;

	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
M
Moshe Lazer 已提交
643 644 645 646 647 648
	if (sq->uar.bf_map) {
		set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
		sq->uar_map = sq->uar.bf_map;
	} else {
		sq->uar_map = sq->uar.map;
	}
649
	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
650
	sq->max_inline  = param->max_inline;
651 652 653
	sq->min_inline_mode =
		MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
		param->min_inline_mode : 0;
654

D
Dan Carpenter 已提交
655 656
	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
	if (err)
657 658
		goto err_sq_wq_destroy;

T
Tariq Toukan 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
	if (param->icosq) {
		u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);

		sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
						wq_sz,
						GFP_KERNEL,
						cpu_to_node(c->cpu));
		if (!sq->ico_wqe_info) {
			err = -ENOMEM;
			goto err_free_sq_db;
		}
	} else {
		int txq_ix;

		txq_ix = c->ix + tc * priv->params.num_channels;
		sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
		priv->txq_to_sq_map[txq_ix] = sq;
	}
677

678
	sq->pdev      = c->pdev;
679
	sq->tstamp    = &priv->tstamp;
680 681 682 683 684
	sq->mkey_be   = c->mkey_be;
	sq->channel   = c;
	sq->tc        = tc;
	sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
685 686 687

	return 0;

T
Tariq Toukan 已提交
688 689 690
err_free_sq_db:
	mlx5e_free_sq_db(sq);

691 692 693 694 695 696 697 698 699 700 701 702 703 704
err_sq_wq_destroy:
	mlx5_wq_destroy(&sq->wq_ctrl);

err_unmap_free_uar:
	mlx5_unmap_free_uar(mdev, &sq->uar);

	return err;
}

static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
{
	struct mlx5e_channel *c = sq->channel;
	struct mlx5e_priv *priv = c->priv;

T
Tariq Toukan 已提交
705
	kfree(sq->ico_wqe_info);
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	mlx5e_free_sq_db(sq);
	mlx5_wq_destroy(&sq->wq_ctrl);
	mlx5_unmap_free_uar(priv->mdev, &sq->uar);
}

static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
{
	struct mlx5e_channel *c = sq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *sqc;
	void *wq;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
		sizeof(u64) * sq->wq_ctrl.buf.npages;
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
	wq = MLX5_ADDR_OF(sqc, sqc, wq);

	memcpy(sqc, param->sqc, sizeof(param->sqc));

T
Tariq Toukan 已提交
734 735
	MLX5_SET(sqc,  sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
	MLX5_SET(sqc,  sqc, cqn,		sq->cq.mcq.cqn);
736
	MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
737
	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
T
Tariq Toukan 已提交
738
	MLX5_SET(sqc,  sqc, tis_lst_sz,		param->icosq ? 0 : 1);
739 740 741 742 743
	MLX5_SET(sqc,  sqc, flush_in_error_en,	1);

	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
744
					  MLX5_ADAPTER_PAGE_SHIFT);
745 746 747 748 749
	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);

	mlx5_fill_page_array(&sq->wq_ctrl.buf,
			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));

750
	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
751 752 753 754 755 756

	kvfree(in);

	return err;
}

757 758
static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
			   int next_state, bool update_rl, int rl_index)
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
{
	struct mlx5e_channel *c = sq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *sqc;
	int inlen;
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);

	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
	MLX5_SET(sqc, sqc, state, next_state);
778 779 780 781
	if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
		MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, rl_index);
	}
782

783
	err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
784 785 786 787 788 789 790 791 792 793 794 795

	kvfree(in);

	return err;
}

static void mlx5e_disable_sq(struct mlx5e_sq *sq)
{
	struct mlx5e_channel *c = sq->channel;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

796
	mlx5_core_destroy_sq(mdev, sq->sqn);
797 798
	if (sq->rate_limit)
		mlx5_rl_remove_rate(mdev, sq->rate_limit);
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
}

static int mlx5e_open_sq(struct mlx5e_channel *c,
			 int tc,
			 struct mlx5e_sq_param *param,
			 struct mlx5e_sq *sq)
{
	int err;

	err = mlx5e_create_sq(c, tc, param, sq);
	if (err)
		return err;

	err = mlx5e_enable_sq(sq, param);
	if (err)
		goto err_destroy_sq;

816 817
	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
			      false, 0);
818 819 820
	if (err)
		goto err_disable_sq;

T
Tariq Toukan 已提交
821 822 823 824
	if (sq->txq) {
		netdev_tx_reset_queue(sq->txq);
		netif_tx_start_queue(sq->txq);
	}
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844

	return 0;

err_disable_sq:
	mlx5e_disable_sq(sq);
err_destroy_sq:
	mlx5e_destroy_sq(sq);

	return err;
}

static inline void netif_tx_disable_queue(struct netdev_queue *txq)
{
	__netif_tx_lock_bh(txq);
	netif_tx_stop_queue(txq);
	__netif_tx_unlock_bh(txq);
}

static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
845 846 847
	set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
	/* prevent netif_tx_wake_queue */
	napi_synchronize(&sq->channel->napi);
848

T
Tariq Toukan 已提交
849 850
	if (sq->txq) {
		netif_tx_disable_queue(sq->txq);
851

852
		/* last doorbell out, godspeed .. */
T
Tariq Toukan 已提交
853 854
		if (mlx5e_sq_has_room_for(sq, 1))
			mlx5e_send_nop(sq, true);
855
	}
856 857

	mlx5e_disable_sq(sq);
858
	mlx5e_free_tx_descs(sq);
859 860 861 862 863 864 865 866 867 868 869
	mlx5e_destroy_sq(sq);
}

static int mlx5e_create_cq(struct mlx5e_channel *c,
			   struct mlx5e_cq_param *param,
			   struct mlx5e_cq *cq)
{
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_cq *mcq = &cq->mcq;
	int eqn_not_used;
870
	unsigned int irqn;
871 872 873
	int err;
	u32 i;

874 875
	param->wq.buf_numa_node = cpu_to_node(c->cpu);
	param->wq.db_numa_node  = cpu_to_node(c->cpu);
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
	param->eq_ix   = c->ix;

	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
			       &cq->wq_ctrl);
	if (err)
		return err;

	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);

	cq->napi        = &c->napi;

	mcq->cqe_sz     = 64;
	mcq->set_ci_db  = cq->wq_ctrl.db.db;
	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
	*mcq->set_ci_db = 0;
	*mcq->arm_db    = 0;
	mcq->vector     = param->eq_ix;
	mcq->comp       = mlx5e_completion_event;
	mcq->event      = mlx5e_cq_error_event;
	mcq->irqn       = irqn;
896
	mcq->uar        = &mdev->mlx5e_res.cq_uar;
897 898 899 900 901 902 903 904

	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);

		cqe->op_own = 0xf1;
	}

	cq->channel = c;
905
	cq->priv = priv;
906 907 908 909 910 911 912 913 914 915 916

	return 0;
}

static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
	mlx5_wq_destroy(&cq->wq_ctrl);
}

static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
917
	struct mlx5e_priv *priv = cq->priv;
918 919 920 921 922 923
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_cq *mcq = &cq->mcq;

	void *in;
	void *cqc;
	int inlen;
924
	unsigned int irqn_not_used;
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
	int eqn;
	int err;

	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
		sizeof(u64) * cq->wq_ctrl.buf.npages;
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);

	memcpy(cqc, param->cqc, sizeof(param->cqc));

	mlx5_fill_page_array(&cq->wq_ctrl.buf,
			     (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));

	mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);

T
Tariq Toukan 已提交
943
	MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
944 945 946
	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
	MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
947
					    MLX5_ADAPTER_PAGE_SHIFT);
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963
	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);

	err = mlx5_core_create_cq(mdev, mcq, in, inlen);

	kvfree(in);

	if (err)
		return err;

	mlx5e_cq_arm(cq);

	return 0;
}

static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
964
	struct mlx5e_priv *priv = cq->priv;
965 966 967 968 969 970 971 972
	struct mlx5_core_dev *mdev = priv->mdev;

	mlx5_core_destroy_cq(mdev, &cq->mcq);
}

static int mlx5e_open_cq(struct mlx5e_channel *c,
			 struct mlx5e_cq_param *param,
			 struct mlx5e_cq *cq,
T
Tariq Toukan 已提交
973
			 struct mlx5e_cq_moder moderation)
974 975 976 977 978 979 980 981 982 983 984 985 986
{
	int err;
	struct mlx5e_priv *priv = c->priv;
	struct mlx5_core_dev *mdev = priv->mdev;

	err = mlx5e_create_cq(c, param, cq);
	if (err)
		return err;

	err = mlx5e_enable_cq(cq, param);
	if (err)
		goto err_destroy_cq;

987 988
	if (MLX5_CAP_GEN(mdev, cq_moderation))
		mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
T
Tariq Toukan 已提交
989 990
					       moderation.usec,
					       moderation.pkts);
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	return 0;

err_destroy_cq:
	mlx5e_destroy_cq(cq);

	return err;
}

static void mlx5e_close_cq(struct mlx5e_cq *cq)
{
	mlx5e_disable_cq(cq);
	mlx5e_destroy_cq(cq);
}

static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{
	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
}

static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
			     struct mlx5e_channel_param *cparam)
{
	struct mlx5e_priv *priv = c->priv;
	int err;
	int tc;

	for (tc = 0; tc < c->num_tc; tc++) {
		err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
T
Tariq Toukan 已提交
1019
				    priv->params.tx_cq_moderation);
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
		if (err)
			goto err_close_tx_cqs;
	}

	return 0;

err_close_tx_cqs:
	for (tc--; tc >= 0; tc--)
		mlx5e_close_cq(&c->sq[tc].cq);

	return err;
}

static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
{
	int tc;

	for (tc = 0; tc < c->num_tc; tc++)
		mlx5e_close_cq(&c->sq[tc].cq);
}

static int mlx5e_open_sqs(struct mlx5e_channel *c,
			  struct mlx5e_channel_param *cparam)
{
	int err;
	int tc;

	for (tc = 0; tc < c->num_tc; tc++) {
		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
		if (err)
			goto err_close_sqs;
	}

	return 0;

err_close_sqs:
	for (tc--; tc >= 0; tc--)
		mlx5e_close_sq(&c->sq[tc]);

	return err;
}

static void mlx5e_close_sqs(struct mlx5e_channel *c)
{
	int tc;

	for (tc = 0; tc < c->num_tc; tc++)
		mlx5e_close_sq(&c->sq[tc]);
}

1070
static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
1071 1072 1073
{
	int i;

1074
	for (i = 0; i < priv->profile->max_tc; i++)
1075 1076
		priv->channeltc_to_txq_map[ix][i] =
			ix + i * priv->params.num_channels;
1077 1078
}

1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
static int mlx5e_set_sq_maxrate(struct net_device *dev,
				struct mlx5e_sq *sq, u32 rate)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;
	u16 rl_index = 0;
	int err;

	if (rate == sq->rate_limit)
		/* nothing to do */
		return 0;

	if (sq->rate_limit)
		/* remove current rl index to free space to next ones */
		mlx5_rl_remove_rate(mdev, sq->rate_limit);

	sq->rate_limit = 0;

	if (rate) {
		err = mlx5_rl_add_rate(mdev, rate, &rl_index);
		if (err) {
			netdev_err(dev, "Failed configuring rate %u: %d\n",
				   rate, err);
			return err;
		}
	}

	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
			      MLX5_SQC_STATE_RDY, true, rl_index);
	if (err) {
		netdev_err(dev, "Failed configuring rate %u: %d\n",
			   rate, err);
		/* remove the rate from the table */
		if (rate)
			mlx5_rl_remove_rate(mdev, rate);
		return err;
	}

	sq->rate_limit = rate;
	return 0;
}

static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
	int err = 0;

	if (!mlx5_rl_is_supported(mdev)) {
		netdev_err(dev, "Rate limiting is not supported on this device\n");
		return -EINVAL;
	}

	/* rate is given in Mb/sec, HW config is in Kb/sec */
	rate = rate << 10;

	/* Check whether rate in valid range, 0 is always valid */
	if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
		netdev_err(dev, "TX rate %u, is not in range\n", rate);
		return -ERANGE;
	}

	mutex_lock(&priv->state_lock);
	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
		err = mlx5e_set_sq_maxrate(dev, sq, rate);
	if (!err)
		priv->tx_rates[index] = rate;
	mutex_unlock(&priv->state_lock);

	return err;
}

1152 1153 1154 1155
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel **cp)
{
T
Tariq Toukan 已提交
1156
	struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
1157
	struct net_device *netdev = priv->netdev;
1158
	struct mlx5e_cq_moder rx_cq_profile;
1159 1160
	int cpu = mlx5e_get_cpu(priv, ix);
	struct mlx5e_channel *c;
1161
	struct mlx5e_sq *sq;
1162
	int err;
1163
	int i;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173

	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
	if (!c)
		return -ENOMEM;

	c->priv     = priv;
	c->ix       = ix;
	c->cpu      = cpu;
	c->pdev     = &priv->mdev->pdev->dev;
	c->netdev   = priv->netdev;
1174
	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1175
	c->num_tc   = priv->params.num_tc;
1176

1177 1178 1179 1180 1181
	if (priv->params.rx_am_enabled)
		rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
	else
		rx_cq_profile = priv->params.rx_cq_moderation;

1182
	mlx5e_build_channeltc_to_txq_map(priv, ix);
1183

1184 1185
	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);

T
Tariq Toukan 已提交
1186
	err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
1187 1188 1189
	if (err)
		goto err_napi_del;

T
Tariq Toukan 已提交
1190 1191 1192 1193
	err = mlx5e_open_tx_cqs(c, cparam);
	if (err)
		goto err_close_icosq_cq;

1194
	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
1195
			    rx_cq_profile);
1196 1197 1198 1199 1200
	if (err)
		goto err_close_tx_cqs;

	napi_enable(&c->napi);

T
Tariq Toukan 已提交
1201
	err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
1202 1203 1204
	if (err)
		goto err_disable_napi;

T
Tariq Toukan 已提交
1205 1206 1207 1208
	err = mlx5e_open_sqs(c, cparam);
	if (err)
		goto err_close_icosq;

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	for (i = 0; i < priv->params.num_tc; i++) {
		u32 txq_ix = priv->channeltc_to_txq_map[ix][i];

		if (priv->tx_rates[txq_ix]) {
			sq = priv->txq_to_sq_map[txq_ix];
			mlx5e_set_sq_maxrate(priv->netdev, sq,
					     priv->tx_rates[txq_ix]);
		}
	}

1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
	if (err)
		goto err_close_sqs;

	netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
	*cp = c;

	return 0;

err_close_sqs:
	mlx5e_close_sqs(c);

T
Tariq Toukan 已提交
1231 1232 1233
err_close_icosq:
	mlx5e_close_sq(&c->icosq);

1234 1235 1236 1237 1238 1239 1240
err_disable_napi:
	napi_disable(&c->napi);
	mlx5e_close_cq(&c->rq.cq);

err_close_tx_cqs:
	mlx5e_close_tx_cqs(c);

T
Tariq Toukan 已提交
1241 1242 1243
err_close_icosq_cq:
	mlx5e_close_cq(&c->icosq.cq);

1244 1245
err_napi_del:
	netif_napi_del(&c->napi);
E
Eric Dumazet 已提交
1246
	napi_hash_del(&c->napi);
1247 1248 1249 1250 1251 1252 1253 1254 1255
	kfree(c);

	return err;
}

static void mlx5e_close_channel(struct mlx5e_channel *c)
{
	mlx5e_close_rq(&c->rq);
	mlx5e_close_sqs(c);
T
Tariq Toukan 已提交
1256
	mlx5e_close_sq(&c->icosq);
1257 1258 1259
	napi_disable(&c->napi);
	mlx5e_close_cq(&c->rq.cq);
	mlx5e_close_tx_cqs(c);
T
Tariq Toukan 已提交
1260
	mlx5e_close_cq(&c->icosq.cq);
1261
	netif_napi_del(&c->napi);
E
Eric Dumazet 已提交
1262 1263 1264 1265

	napi_hash_del(&c->napi);
	synchronize_rcu();

1266 1267 1268 1269 1270 1271 1272 1273 1274
	kfree(c);
}

static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
				 struct mlx5e_rq_param *param)
{
	void *rqc = param->rqc;
	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);

1275 1276 1277
	switch (priv->params.rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		MLX5_SET(wq, wq, log_wqe_num_of_strides,
1278
			 priv->params.mpwqe_log_num_strides - 9);
1279
		MLX5_SET(wq, wq, log_wqe_stride_size,
1280
			 priv->params.mpwqe_log_stride_sz - 6);
1281 1282 1283 1284 1285 1286
		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
	}

1287 1288 1289
	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
1290
	MLX5_SET(wq, wq, pd,               priv->mdev->mlx5e_res.pdn);
1291
	MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1292

1293
	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1294
	param->wq.linear = 1;
1295 1296

	param->am_enabled = priv->params.rx_am_enabled;
1297 1298
}

1299 1300 1301 1302 1303 1304 1305 1306 1307
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
{
	void *rqc = param->rqc;
	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);

	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
}

T
Tariq Toukan 已提交
1308 1309
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
					struct mlx5e_sq_param *param)
1310 1311 1312 1313 1314
{
	void *sqc = param->sqc;
	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);

	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1315
	MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
1316

1317
	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
T
Tariq Toukan 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
}

static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
				 struct mlx5e_sq_param *param)
{
	void *sqc = param->sqc;
	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);

	mlx5e_build_sq_param_common(priv, param);
	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);

1329
	param->max_inline = priv->params.tx_max_inline;
1330
	param->min_inline_mode = priv->params.tx_min_inline_mode;
1331 1332 1333 1334 1335 1336 1337
}

static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
					struct mlx5e_cq_param *param)
{
	void *cqc = param->cqc;

1338
	MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
1339 1340 1341 1342 1343 1344
}

static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
				    struct mlx5e_cq_param *param)
{
	void *cqc = param->cqc;
1345
	u8 log_cq_size;
1346

1347 1348 1349
	switch (priv->params.rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		log_cq_size = priv->params.log_rq_size +
1350
			priv->params.mpwqe_log_num_strides;
1351 1352 1353 1354 1355 1356
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		log_cq_size = priv->params.log_rq_size;
	}

	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
T
Tariq Toukan 已提交
1357 1358 1359 1360
	if (priv->params.rx_cqe_compress) {
		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
	}
1361 1362

	mlx5e_build_common_cq_param(priv, param);
T
Tariq Toukan 已提交
1363 1364

	param->cq_period_mode = priv->params.rx_cq_period_mode;
1365 1366 1367 1368 1369 1370 1371
}

static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
				    struct mlx5e_cq_param *param)
{
	void *cqc = param->cqc;

T
Tariq Toukan 已提交
1372
	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1373 1374

	mlx5e_build_common_cq_param(priv, param);
T
Tariq Toukan 已提交
1375 1376

	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1377 1378
}

T
Tariq Toukan 已提交
1379 1380 1381 1382 1383 1384 1385 1386 1387
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
				     struct mlx5e_cq_param *param,
				     u8 log_wq_size)
{
	void *cqc = param->cqc;

	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);

	mlx5e_build_common_cq_param(priv, param);
T
Tariq Toukan 已提交
1388 1389

	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
T
Tariq Toukan 已提交
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
}

static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
				    struct mlx5e_sq_param *param,
				    u8 log_wq_size)
{
	void *sqc = param->sqc;
	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);

	mlx5e_build_sq_param_common(priv, param);

	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
1402
	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
T
Tariq Toukan 已提交
1403 1404 1405 1406

	param->icosq = true;
}

1407
static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
1408
{
1409
	u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
T
Tariq Toukan 已提交
1410

1411 1412
	mlx5e_build_rq_param(priv, &cparam->rq);
	mlx5e_build_sq_param(priv, &cparam->sq);
T
Tariq Toukan 已提交
1413
	mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
1414 1415
	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
T
Tariq Toukan 已提交
1416
	mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
1417 1418 1419 1420
}

static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
1421
	struct mlx5e_channel_param *cparam;
1422
	int nch = priv->params.num_channels;
1423
	int err = -ENOMEM;
1424 1425 1426
	int i;
	int j;

1427 1428
	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
				GFP_KERNEL);
1429

1430
	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1431 1432
				      sizeof(struct mlx5e_sq *), GFP_KERNEL);

1433 1434 1435
	cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);

	if (!priv->channel || !priv->txq_to_sq_map || !cparam)
1436
		goto err_free_txq_to_sq_map;
1437

1438 1439
	mlx5e_build_channel_param(priv, cparam);

1440
	for (i = 0; i < nch; i++) {
1441
		err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
1442 1443 1444 1445
		if (err)
			goto err_close_channels;
	}

1446
	for (j = 0; j < nch; j++) {
1447 1448 1449 1450 1451
		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
		if (err)
			goto err_close_channels;
	}

1452 1453 1454 1455 1456
	/* FIXME: This is a W/A for tx timeout watch dog false alarm when
	 * polling for inactive tx queues.
	 */
	netif_tx_start_all_queues(priv->netdev);

1457
	kfree(cparam);
1458 1459 1460 1461 1462 1463
	return 0;

err_close_channels:
	for (i--; i >= 0; i--)
		mlx5e_close_channel(priv->channel[i]);

1464 1465
err_free_txq_to_sq_map:
	kfree(priv->txq_to_sq_map);
1466
	kfree(priv->channel);
1467
	kfree(cparam);
1468 1469 1470 1471 1472 1473 1474 1475

	return err;
}

static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
	int i;

1476 1477 1478 1479 1480 1481
	/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
	 * polling for inactive tx queues.
	 */
	netif_tx_stop_all_queues(priv->netdev);
	netif_tx_disable(priv->netdev);

1482 1483 1484
	for (i = 0; i < priv->params.num_channels; i++)
		mlx5e_close_channel(priv->channel[i]);

1485
	kfree(priv->txq_to_sq_map);
1486 1487 1488
	kfree(priv->channel);
}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
static int mlx5e_rx_hash_fn(int hfunc)
{
	return (hfunc == ETH_RSS_HASH_TOP) ?
	       MLX5_RX_HASH_FN_TOEPLITZ :
	       MLX5_RX_HASH_FN_INVERTED_XOR8;
}

static int mlx5e_bits_invert(unsigned long a, int size)
{
	int inv = 0;
	int i;

	for (i = 0; i < size; i++)
		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;

	return inv;
}

1507 1508 1509 1510 1511 1512
static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
{
	int i;

	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
		int ix = i;
T
Tariq Toukan 已提交
1513
		u32 rqn;
1514 1515 1516 1517

		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);

1518
		ix = priv->params.indirection_rqt[ix];
T
Tariq Toukan 已提交
1519 1520 1521 1522
		rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
				priv->channel[ix]->rq.rqn :
				priv->drop_rq.rqn;
		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
1523 1524 1525
	}
}

T
Tariq Toukan 已提交
1526 1527
static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
				      int ix)
A
Achiad Shochat 已提交
1528
{
T
Tariq Toukan 已提交
1529 1530 1531
	u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
			priv->channel[ix]->rq.rqn :
			priv->drop_rq.rqn;
A
Achiad Shochat 已提交
1532

T
Tariq Toukan 已提交
1533
	MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
A
Achiad Shochat 已提交
1534 1535
}

1536 1537
static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
			    int ix, struct mlx5e_rqt *rqt)
1538 1539 1540 1541 1542
{
	struct mlx5_core_dev *mdev = priv->mdev;
	void *rqtc;
	int inlen;
	int err;
T
Tariq Toukan 已提交
1543
	u32 *in;
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554

	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);

	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);

T
Tariq Toukan 已提交
1555 1556 1557 1558
	if (sz > 1) /* RSS */
		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
	else
		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
1559

1560 1561 1562
	err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
	if (!err)
		rqt->enabled = true;
1563 1564

	kvfree(in);
T
Tariq Toukan 已提交
1565 1566 1567
	return err;
}

1568
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
T
Tariq Toukan 已提交
1569
{
1570 1571
	rqt->enabled = false;
	mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
T
Tariq Toukan 已提交
1572 1573
}

1574 1575 1576 1577 1578 1579 1580
static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
{
	struct mlx5e_rqt *rqt = &priv->indir_rqt;

	return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
}

1581
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
T
Tariq Toukan 已提交
1582
{
1583
	struct mlx5e_rqt *rqt;
T
Tariq Toukan 已提交
1584 1585 1586
	int err;
	int ix;

1587
	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
1588 1589
		rqt = &priv->direct_tir[ix].rqt;
		err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
T
Tariq Toukan 已提交
1590 1591 1592 1593 1594 1595 1596 1597
		if (err)
			goto err_destroy_rqts;
	}

	return 0;

err_destroy_rqts:
	for (ix--; ix >= 0; ix--)
1598
		mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
T
Tariq Toukan 已提交
1599

1600 1601 1602
	return err;
}

T
Tariq Toukan 已提交
1603
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
1604 1605 1606 1607
{
	struct mlx5_core_dev *mdev = priv->mdev;
	void *rqtc;
	int inlen;
T
Tariq Toukan 已提交
1608
	u32 *in;
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
	int err;

	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);

	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
T
Tariq Toukan 已提交
1619 1620 1621 1622
	if (sz > 1) /* RSS */
		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
	else
		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
1623 1624 1625

	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);

T
Tariq Toukan 已提交
1626
	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
1627 1628 1629 1630 1631 1632

	kvfree(in);

	return err;
}

1633 1634
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
T
Tariq Toukan 已提交
1635 1636 1637
	u32 rqtn;
	int ix;

1638 1639 1640 1641 1642
	if (priv->indir_rqt.enabled) {
		rqtn = priv->indir_rqt.rqtn;
		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
	}

T
Tariq Toukan 已提交
1643
	for (ix = 0; ix < priv->params.num_channels; ix++) {
1644 1645 1646
		if (!priv->direct_tir[ix].rqt.enabled)
			continue;
		rqtn = priv->direct_tir[ix].rqt.rqtn;
T
Tariq Toukan 已提交
1647 1648
		mlx5e_redirect_rqt(priv, rqtn, 1, ix);
	}
1649 1650
}

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
{
	if (!priv->params.lro_en)
		return;

#define ROUGH_MAX_L2_L3_HDR_SZ 256

	MLX5_SET(tirc, tirc, lro_enable_mask,
		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
		 (priv->params.lro_wqe_sz -
		  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
	MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
		 MLX5_CAP_ETH(priv->mdev,
A
Achiad Shochat 已提交
1666
			      lro_timer_supported_periods[2]));
1667 1668
}

1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
{
	MLX5_SET(tirc, tirc, rx_hash_fn,
		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
		void *rss_key = MLX5_ADDR_OF(tirc, tirc,
					     rx_hash_toeplitz_key);
		size_t len = MLX5_FLD_SZ_BYTES(tirc,
					       rx_hash_toeplitz_key);

		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
	}
}

T
Tariq Toukan 已提交
1684
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1685 1686 1687 1688 1689 1690 1691
{
	struct mlx5_core_dev *mdev = priv->mdev;

	void *in;
	void *tirc;
	int inlen;
	int err;
T
Tariq Toukan 已提交
1692
	int tt;
T
Tariq Toukan 已提交
1693
	int ix;
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704

	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);

	mlx5e_build_tir_ctx_lro(tirc, priv);

T
Tariq Toukan 已提交
1705
	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1706
		err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
T
Tariq Toukan 已提交
1707
					   inlen);
T
Tariq Toukan 已提交
1708
		if (err)
T
Tariq Toukan 已提交
1709
			goto free_in;
T
Tariq Toukan 已提交
1710
	}
1711

1712
	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
T
Tariq Toukan 已提交
1713 1714 1715 1716 1717 1718 1719
		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
					   in, inlen);
		if (err)
			goto free_in;
	}

free_in:
1720 1721 1722 1723 1724
	kvfree(in);

	return err;
}

1725
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
1726 1727
{
	struct mlx5_core_dev *mdev = priv->mdev;
1728
	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
1729 1730
	int err;

1731
	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
1732 1733 1734
	if (err)
		return err;

1735 1736 1737 1738
	/* Update vport context MTU */
	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
	return 0;
}
1739

1740 1741 1742 1743 1744
static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	u16 hw_mtu = 0;
	int err;
1745

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
	if (err || !hw_mtu) /* fallback to port oper mtu */
		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);

	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
}

static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	u16 mtu;
	int err;

	err = mlx5e_set_mtu(priv, netdev->mtu);
	if (err)
		return err;
1762

1763 1764 1765 1766
	mlx5e_query_mtu(priv, &mtu);
	if (mtu != netdev->mtu)
		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
			    __func__, mtu, netdev->mtu);
1767

1768
	netdev->mtu = mtu;
1769 1770 1771
	return 0;
}

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int nch = priv->params.num_channels;
	int ntc = priv->params.num_tc;
	int tc;

	netdev_reset_tc(netdev);

	if (ntc == 1)
		return;

	netdev_set_num_tc(netdev, ntc);

1786 1787 1788
	/* Map netdev TCs to offset 0
	 * We have our own UP to TXQ mapping for QoS
	 */
1789
	for (tc = 0; tc < ntc; tc++)
1790
		netdev_set_tc_queue(netdev, tc, nch, 0);
1791 1792
}

1793 1794 1795
int mlx5e_open_locked(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
1796
	struct mlx5_core_dev *mdev = priv->mdev;
1797 1798 1799 1800 1801
	int num_txqs;
	int err;

	set_bit(MLX5E_STATE_OPENED, &priv->state);

1802 1803
	mlx5e_netdev_set_tcs(netdev);

1804 1805 1806 1807 1808 1809 1810 1811
	num_txqs = priv->params.num_channels * priv->params.num_tc;
	netif_set_real_num_tx_queues(netdev, num_txqs);
	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);

	err = mlx5e_open_channels(priv);
	if (err) {
		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
			   __func__, err);
1812
		goto err_clear_state_opened_flag;
1813 1814
	}

1815
	err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
1816 1817 1818 1819 1820 1821
	if (err) {
		netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
			   __func__, err);
		goto err_close_channels;
	}

1822
	mlx5e_redirect_rqts(priv);
1823
	mlx5e_update_carrier(priv);
1824
	mlx5e_timestamp_init(priv);
1825 1826 1827
#ifdef CONFIG_RFS_ACCEL
	priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
#endif
1828 1829
	if (priv->profile->update_stats)
		queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
1830

1831 1832 1833 1834 1835
	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
		err = mlx5e_add_sqs_fwd_rules(priv);
		if (err)
			goto err_close_channels;
	}
1836
	return 0;
1837

1838 1839
err_close_channels:
	mlx5e_close_channels(priv);
1840 1841 1842
err_clear_state_opened_flag:
	clear_bit(MLX5E_STATE_OPENED, &priv->state);
	return err;
1843 1844
}

1845
int mlx5e_open(struct net_device *netdev)
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int err;

	mutex_lock(&priv->state_lock);
	err = mlx5e_open_locked(netdev);
	mutex_unlock(&priv->state_lock);

	return err;
}

int mlx5e_close_locked(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
1860
	struct mlx5_core_dev *mdev = priv->mdev;
1861

1862 1863 1864 1865 1866 1867
	/* May already be CLOSED in case a previous configuration operation
	 * (e.g RX/TX queue size change) that involves close&open failed.
	 */
	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
		return 0;

1868 1869
	clear_bit(MLX5E_STATE_OPENED, &priv->state);

1870 1871 1872
	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		mlx5e_remove_sqs_fwd_rules(priv);

1873
	mlx5e_timestamp_cleanup(priv);
1874
	netif_carrier_off(priv->netdev);
1875
	mlx5e_redirect_rqts(priv);
1876 1877 1878 1879 1880
	mlx5e_close_channels(priv);

	return 0;
}

1881
int mlx5e_close(struct net_device *netdev)
1882 1883 1884 1885
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int err;

1886 1887 1888
	if (!netif_device_present(netdev))
		return -ENODEV;

1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
	mutex_lock(&priv->state_lock);
	err = mlx5e_close_locked(netdev);
	mutex_unlock(&priv->state_lock);

	return err;
}

static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
				struct mlx5e_rq *rq,
				struct mlx5e_rq_param *param)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	void *rqc = param->rqc;
	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
	int err;

	param->wq.db_numa_node = param->wq.buf_numa_node;

	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
				&rq->wq_ctrl);
	if (err)
		return err;

	rq->priv = priv;

	return 0;
}

static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
				struct mlx5e_cq *cq,
				struct mlx5e_cq_param *param)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_core_cq *mcq = &cq->mcq;
	int eqn_not_used;
1924
	unsigned int irqn;
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
	int err;

	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
			       &cq->wq_ctrl);
	if (err)
		return err;

	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);

	mcq->cqe_sz     = 64;
	mcq->set_ci_db  = cq->wq_ctrl.db.db;
	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
	*mcq->set_ci_db = 0;
	*mcq->arm_db    = 0;
	mcq->vector     = param->eq_ix;
	mcq->comp       = mlx5e_completion_event;
	mcq->event      = mlx5e_cq_error_event;
	mcq->irqn       = irqn;
1943
	mcq->uar        = &mdev->mlx5e_res.cq_uar;
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959

	cq->priv = priv;

	return 0;
}

static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
{
	struct mlx5e_cq_param cq_param;
	struct mlx5e_rq_param rq_param;
	struct mlx5e_rq *rq = &priv->drop_rq;
	struct mlx5e_cq *cq = &priv->drop_rq.cq;
	int err;

	memset(&cq_param, 0, sizeof(cq_param));
	memset(&rq_param, 0, sizeof(rq_param));
1960
	mlx5e_build_drop_rq_param(&rq_param);
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002

	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
	if (err)
		return err;

	err = mlx5e_enable_cq(cq, &cq_param);
	if (err)
		goto err_destroy_cq;

	err = mlx5e_create_drop_rq(priv, rq, &rq_param);
	if (err)
		goto err_disable_cq;

	err = mlx5e_enable_rq(rq, &rq_param);
	if (err)
		goto err_destroy_rq;

	return 0;

err_destroy_rq:
	mlx5e_destroy_rq(&priv->drop_rq);

err_disable_cq:
	mlx5e_disable_cq(&priv->drop_rq.cq);

err_destroy_cq:
	mlx5e_destroy_cq(&priv->drop_rq.cq);

	return err;
}

static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
{
	mlx5e_disable_rq(&priv->drop_rq);
	mlx5e_destroy_rq(&priv->drop_rq);
	mlx5e_disable_cq(&priv->drop_rq.cq);
	mlx5e_destroy_cq(&priv->drop_rq.cq);
}

static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
	struct mlx5_core_dev *mdev = priv->mdev;
2003
	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2004 2005
	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);

2006
	MLX5_SET(tisc, tisc, prio, tc << 1);
2007
	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2008 2009 2010 2011

	if (mlx5_lag_is_lacp_owner(mdev))
		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);

2012 2013 2014 2015 2016 2017 2018 2019
	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}

static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
{
	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}

2020
int mlx5e_create_tises(struct mlx5e_priv *priv)
2021 2022 2023 2024
{
	int err;
	int tc;

2025
	for (tc = 0; tc < priv->profile->max_tc; tc++) {
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
		err = mlx5e_create_tis(priv, tc);
		if (err)
			goto err_close_tises;
	}

	return 0;

err_close_tises:
	for (tc--; tc >= 0; tc--)
		mlx5e_destroy_tis(priv, tc);

	return err;
}

2040
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2041 2042 2043
{
	int tc;

2044
	for (tc = 0; tc < priv->profile->max_tc; tc++)
2045 2046 2047
		mlx5e_destroy_tis(priv, tc);
}

T
Tariq Toukan 已提交
2048 2049
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
				      enum mlx5e_traffic_types tt)
2050 2051 2052
{
	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);

2053
	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2054

2055 2056
#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
				 MLX5_HASH_FIELD_SEL_DST_IP)
2057

2058 2059 2060 2061
#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
				 MLX5_HASH_FIELD_SEL_DST_IP   |\
				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
				 MLX5_HASH_FIELD_SEL_L4_DPORT)
2062

2063 2064 2065 2066
#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
				 MLX5_HASH_FIELD_SEL_DST_IP   |\
				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)

2067
	mlx5e_build_tir_ctx_lro(tirc, priv);
2068

A
Achiad Shochat 已提交
2069
	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2070
	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
T
Tariq Toukan 已提交
2071
	mlx5e_build_tir_ctx_hash(tirc, priv);
2072 2073 2074 2075 2076 2077 2078 2079

	switch (tt) {
	case MLX5E_TT_IPV4_TCP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_TCP);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2080
			 MLX5_HASH_IP_L4PORTS);
2081 2082 2083 2084 2085 2086 2087 2088
		break;

	case MLX5E_TT_IPV6_TCP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_TCP);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2089
			 MLX5_HASH_IP_L4PORTS);
2090 2091 2092 2093 2094 2095 2096 2097
		break;

	case MLX5E_TT_IPV4_UDP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_UDP);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2098
			 MLX5_HASH_IP_L4PORTS);
2099 2100 2101 2102 2103 2104 2105 2106
		break;

	case MLX5E_TT_IPV6_UDP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);
		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
			 MLX5_L4_PROT_TYPE_UDP);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2107
			 MLX5_HASH_IP_L4PORTS);
2108 2109
		break;

2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
	case MLX5E_TT_IPV4_IPSEC_AH:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP_IPSEC_SPI);
		break;

	case MLX5E_TT_IPV6_IPSEC_AH:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP_IPSEC_SPI);
		break;

	case MLX5E_TT_IPV4_IPSEC_ESP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP_IPSEC_SPI);
		break;

	case MLX5E_TT_IPV6_IPSEC_ESP:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP_IPSEC_SPI);
		break;

2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
	case MLX5E_TT_IPV4:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV4);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP);
		break;

	case MLX5E_TT_IPV6:
		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
			 MLX5_L3_PROT_TYPE_IPV6);
		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
			 MLX5_HASH_IP);
		break;
T
Tariq Toukan 已提交
2151 2152 2153
	default:
		WARN_ONCE(true,
			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2154 2155 2156
	}
}

T
Tariq Toukan 已提交
2157 2158
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
				       u32 rqtn)
2159
{
2160
	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
T
Tariq Toukan 已提交
2161 2162 2163 2164 2165 2166 2167 2168

	mlx5e_build_tir_ctx_lro(tirc, priv);

	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
	MLX5_SET(tirc, tirc, indirect_table, rqtn);
	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}

2169
static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
T
Tariq Toukan 已提交
2170
{
2171
	struct mlx5e_tir *tir;
2172 2173 2174
	void *tirc;
	int inlen;
	int err;
T
Tariq Toukan 已提交
2175 2176
	u32 *in;
	int tt;
2177 2178 2179 2180 2181 2182

	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

T
Tariq Toukan 已提交
2183 2184
	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
		memset(in, 0, inlen);
2185
		tir = &priv->indir_tir[tt];
T
Tariq Toukan 已提交
2186 2187
		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
		mlx5e_build_indir_tir_ctx(priv, tirc, tt);
2188
		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2189
		if (err)
2190
			goto err_destroy_tirs;
2191 2192
	}

2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205
	kvfree(in);

	return 0;

err_destroy_tirs:
	for (tt--; tt >= 0; tt--)
		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);

	kvfree(in);

	return err;
}

2206
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
{
	int nch = priv->profile->max_nch(priv->mdev);
	struct mlx5e_tir *tir;
	void *tirc;
	int inlen;
	int err;
	u32 *in;
	int ix;

	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

T
Tariq Toukan 已提交
2221 2222
	for (ix = 0; ix < nch; ix++) {
		memset(in, 0, inlen);
2223
		tir = &priv->direct_tir[ix];
T
Tariq Toukan 已提交
2224 2225
		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
		mlx5e_build_direct_tir_ctx(priv, tirc,
2226
					   priv->direct_tir[ix].rqt.rqtn);
2227
		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
T
Tariq Toukan 已提交
2228 2229 2230 2231 2232 2233
		if (err)
			goto err_destroy_ch_tirs;
	}

	kvfree(in);

2234 2235
	return 0;

T
Tariq Toukan 已提交
2236 2237
err_destroy_ch_tirs:
	for (ix--; ix >= 0; ix--)
2238
		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
T
Tariq Toukan 已提交
2239 2240

	kvfree(in);
2241 2242 2243 2244

	return err;
}

2245
static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
2246 2247 2248
{
	int i;

T
Tariq Toukan 已提交
2249
	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
2250
		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
2251 2252
}

2253
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
2254 2255 2256 2257 2258 2259 2260 2261
{
	int nch = priv->profile->max_nch(priv->mdev);
	int i;

	for (i = 0; i < nch; i++)
		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
}

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
{
	int err = 0;
	int i;

	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
		return 0;

	for (i = 0; i < priv->params.num_channels; i++) {
		err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
		if (err)
			return err;
	}

	return 0;
}

2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	bool was_opened;
	int err = 0;

	if (tc && tc != MLX5E_MAX_NUM_TC)
		return -EINVAL;

	mutex_lock(&priv->state_lock);

	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
	if (was_opened)
		mlx5e_close_locked(priv->netdev);

	priv->params.num_tc = tc ? tc : 1;

	if (was_opened)
		err = mlx5e_open_locked(priv->netdev);

	mutex_unlock(&priv->state_lock);

	return err;
}

static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
			      __be16 proto, struct tc_to_netdev *tc)
{
2307 2308 2309 2310 2311 2312
	struct mlx5e_priv *priv = netdev_priv(dev);

	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
		goto mqprio;

	switch (tc->type) {
2313 2314 2315 2316 2317 2318
	case TC_SETUP_CLSFLOWER:
		switch (tc->cls_flower->command) {
		case TC_CLSFLOWER_REPLACE:
			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
		case TC_CLSFLOWER_DESTROY:
			return mlx5e_delete_flower(priv, tc->cls_flower);
2319 2320
		case TC_CLSFLOWER_STATS:
			return mlx5e_stats_flower(priv, tc->cls_flower);
2321
		}
2322 2323 2324 2325 2326
	default:
		return -EOPNOTSUPP;
	}

mqprio:
2327
	if (tc->type != TC_SETUP_MQPRIO)
2328 2329 2330 2331 2332
		return -EINVAL;

	return mlx5e_setup_tc(dev, tc->tc);
}

2333
struct rtnl_link_stats64 *
2334 2335 2336
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
2337
	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
2338
	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
2339
	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
2340

2341 2342 2343 2344
	stats->rx_packets = sstats->rx_packets;
	stats->rx_bytes   = sstats->rx_bytes;
	stats->tx_packets = sstats->tx_packets;
	stats->tx_bytes   = sstats->tx_bytes;
2345 2346

	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
2347
	stats->tx_dropped = sstats->tx_queue_dropped;
2348 2349

	stats->rx_length_errors =
2350 2351 2352
		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
		PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
		PPORT_802_3_GET(pstats, a_frame_too_long_errors);
2353
	stats->rx_crc_errors =
2354 2355 2356
		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
2357
	stats->tx_carrier_errors =
2358
		PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
2359 2360 2361 2362 2363 2364 2365
	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
			   stats->rx_frame_errors;
	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;

	/* vport multicast also counts packets that are dropped due to steering
	 * or rx out of buffer
	 */
2366 2367
	stats->multicast =
		VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
2368 2369 2370 2371 2372 2373 2374 2375

	return stats;
}

static void mlx5e_set_rx_mode(struct net_device *dev)
{
	struct mlx5e_priv *priv = netdev_priv(dev);

2376
	queue_work(priv->wq, &priv->set_rx_mode_work);
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
}

static int mlx5e_set_mac(struct net_device *netdev, void *addr)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct sockaddr *saddr = addr;

	if (!is_valid_ether_addr(saddr->sa_data))
		return -EADDRNOTAVAIL;

	netif_addr_lock_bh(netdev);
	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
	netif_addr_unlock_bh(netdev);

2391
	queue_work(priv->wq, &priv->set_rx_mode_work);
2392 2393 2394 2395

	return 0;
}

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
#define MLX5E_SET_FEATURE(netdev, feature, enable)	\
	do {						\
		if (enable)				\
			netdev->features |= feature;	\
		else					\
			netdev->features &= ~feature;	\
	} while (0)

typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);

static int set_feature_lro(struct net_device *netdev, bool enable)
2407 2408
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
2409 2410
	bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
	int err;
2411 2412 2413

	mutex_lock(&priv->state_lock);

2414 2415
	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
		mlx5e_close_locked(priv->netdev);
2416

2417 2418 2419 2420 2421
	priv->params.lro_en = enable;
	err = mlx5e_modify_tirs_lro(priv);
	if (err) {
		netdev_err(netdev, "lro modify failed, %d\n", err);
		priv->params.lro_en = !enable;
2422
	}
2423

2424 2425 2426
	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
		mlx5e_open_locked(priv->netdev);

2427 2428
	mutex_unlock(&priv->state_lock);

2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
	return err;
}

static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

	if (enable)
		mlx5e_enable_vlan_filter(priv);
	else
		mlx5e_disable_vlan_filter(priv);

	return 0;
}

static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
2447

2448
	if (!enable && mlx5e_tc_num_filters(priv)) {
2449 2450 2451 2452 2453
		netdev_err(netdev,
			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
		return -EINVAL;
	}

2454 2455 2456
	return 0;
}

2457 2458 2459 2460 2461 2462 2463 2464
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_set_port_fcs(mdev, !enable);
}

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481
static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int err;

	mutex_lock(&priv->state_lock);

	priv->params.vlan_strip_disable = !enable;
	err = mlx5e_modify_rqs_vsd(priv, !enable);
	if (err)
		priv->params.vlan_strip_disable = enable;

	mutex_unlock(&priv->state_lock);

	return err;
}

2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
#ifdef CONFIG_RFS_ACCEL
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	int err;

	if (enable)
		err = mlx5e_arfs_enable(priv);
	else
		err = mlx5e_arfs_disable(priv);

	return err;
}
#endif

2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531
static int mlx5e_handle_feature(struct net_device *netdev,
				netdev_features_t wanted_features,
				netdev_features_t feature,
				mlx5e_feature_handler feature_handler)
{
	netdev_features_t changes = wanted_features ^ netdev->features;
	bool enable = !!(wanted_features & feature);
	int err;

	if (!(changes & feature))
		return 0;

	err = feature_handler(netdev, enable);
	if (err) {
		netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
			   enable ? "Enable" : "Disable", feature, err);
		return err;
	}

	MLX5E_SET_FEATURE(netdev, feature, enable);
	return 0;
}

static int mlx5e_set_features(struct net_device *netdev,
			      netdev_features_t features)
{
	int err;

	err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
				    set_feature_lro);
	err |= mlx5e_handle_feature(netdev, features,
				    NETIF_F_HW_VLAN_CTAG_FILTER,
				    set_feature_vlan_filter);
	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
				    set_feature_tc_num_filters);
2532 2533
	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
				    set_feature_rx_all);
2534 2535
	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
				    set_feature_rx_vlan);
2536 2537 2538 2539
#ifdef CONFIG_RFS_ACCEL
	err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
				    set_feature_arfs);
#endif
2540 2541

	return err ? -EINVAL : 0;
2542 2543
}

S
Saeed Mahameed 已提交
2544 2545 2546
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)

2547 2548 2549 2550
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5_core_dev *mdev = priv->mdev;
2551
	bool was_opened;
2552
	u16 max_mtu;
S
Saeed Mahameed 已提交
2553
	u16 min_mtu;
2554
	int err = 0;
2555
	bool reset;
2556

S
Saeed Mahameed 已提交
2557
	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2558

D
Doron Tsur 已提交
2559
	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
S
Saeed Mahameed 已提交
2560
	min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
D
Doron Tsur 已提交
2561

S
Saeed Mahameed 已提交
2562
	if (new_mtu > max_mtu || new_mtu < min_mtu) {
S
Saeed Mahameed 已提交
2563
		netdev_err(netdev,
S
Saeed Mahameed 已提交
2564 2565
			   "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
			   __func__, new_mtu, min_mtu, max_mtu);
2566 2567 2568 2569
		return -EINVAL;
	}

	mutex_lock(&priv->state_lock);
2570

2571 2572 2573 2574
	reset = !priv->params.lro_en &&
		(priv->params.rq_wq_type !=
		 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);

2575
	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2576
	if (was_opened && reset)
2577 2578
		mlx5e_close_locked(netdev);

2579
	netdev->mtu = new_mtu;
2580
	mlx5e_set_dev_port_mtu(netdev);
2581

2582
	if (was_opened && reset)
2583 2584
		err = mlx5e_open_locked(netdev);

2585 2586 2587 2588 2589
	mutex_unlock(&priv->state_lock);

	return err;
}

2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	switch (cmd) {
	case SIOCSHWTSTAMP:
		return mlx5e_hwstamp_set(dev, ifr);
	case SIOCGHWTSTAMP:
		return mlx5e_hwstamp_get(dev, ifr);
	default:
		return -EOPNOTSUPP;
	}
}

2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618
static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}

static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
					   vlan, qos);
}

2619 2620 2621 2622 2623 2624 2625 2626
static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
}

2627 2628 2629 2630 2631 2632 2633
static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
}
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
static int mlx5_vport_link2ifla(u8 esw_link)
{
	switch (esw_link) {
	case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
		return IFLA_VF_LINK_STATE_DISABLE;
	case MLX5_ESW_VPORT_ADMIN_STATE_UP:
		return IFLA_VF_LINK_STATE_ENABLE;
	}
	return IFLA_VF_LINK_STATE_AUTO;
}

static int mlx5_ifla_link2vport(u8 ifla_link)
{
	switch (ifla_link) {
	case IFLA_VF_LINK_STATE_DISABLE:
		return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
	case IFLA_VF_LINK_STATE_ENABLE:
		return MLX5_ESW_VPORT_ADMIN_STATE_UP;
	}
	return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
}

static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
				   int link_state)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
					    mlx5_ifla_link2vport(link_state));
}

static int mlx5e_get_vf_config(struct net_device *dev,
			       int vf, struct ifla_vf_info *ivi)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;
	int err;

	err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
	if (err)
		return err;
	ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
	return 0;
}

static int mlx5e_get_vf_stats(struct net_device *dev,
			      int vf, struct ifla_vf_stats *vf_stats)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct mlx5_core_dev *mdev = priv->mdev;

	return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
					    vf_stats);
}

2690
static void mlx5e_add_vxlan_port(struct net_device *netdev,
2691
				 struct udp_tunnel_info *ti)
2692 2693 2694
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

2695 2696 2697
	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
		return;

2698 2699 2700
	if (!mlx5e_vxlan_allowed(priv->mdev))
		return;

2701
	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
2702 2703 2704
}

static void mlx5e_del_vxlan_port(struct net_device *netdev,
2705
				 struct udp_tunnel_info *ti)
2706 2707 2708
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

2709 2710 2711
	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
		return;

2712 2713 2714
	if (!mlx5e_vxlan_allowed(priv->mdev))
		return;

2715
	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
}

static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
						    struct sk_buff *skb,
						    netdev_features_t features)
{
	struct udphdr *udph;
	u16 proto;
	u16 port = 0;

	switch (vlan_get_protocol(skb)) {
	case htons(ETH_P_IP):
		proto = ip_hdr(skb)->protocol;
		break;
	case htons(ETH_P_IPV6):
		proto = ipv6_hdr(skb)->nexthdr;
		break;
	default:
		goto out;
	}

	if (proto == IPPROTO_UDP) {
		udph = udp_hdr(skb);
		port = be16_to_cpu(udph->dest);
	}

	/* Verify if UDP port is being offloaded by HW */
	if (port && mlx5e_vxlan_lookup_port(priv, port))
		return features;

out:
	/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}

static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
					      struct net_device *netdev,
					      netdev_features_t features)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

	features = vlan_features_check(skb, features);
	features = vxlan_features_check(skb, features);

	/* Validate if the tunneled packet is being offloaded by HW */
	if (skb->encapsulation &&
	    (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
		return mlx5e_vxlan_features_check(priv, skb, features);

	return features;
}

2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
static void mlx5e_tx_timeout(struct net_device *dev)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	bool sched_work = false;
	int i;

	netdev_err(dev, "TX timeout detected\n");

	for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
		struct mlx5e_sq *sq = priv->txq_to_sq_map[i];

2779
		if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
2780 2781
			continue;
		sched_work = true;
2782
		set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
2783 2784 2785 2786 2787 2788 2789 2790
		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
	}

	if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
		schedule_work(&priv->tx_timeout_work);
}

2791
static const struct net_device_ops mlx5e_netdev_ops_basic = {
2792 2793 2794
	.ndo_open                = mlx5e_open,
	.ndo_stop                = mlx5e_close,
	.ndo_start_xmit          = mlx5e_xmit,
2795 2796
	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
	.ndo_select_queue        = mlx5e_select_queue,
2797 2798 2799
	.ndo_get_stats64         = mlx5e_get_stats,
	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
	.ndo_set_mac_address     = mlx5e_set_mac,
2800 2801
	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
2802
	.ndo_set_features        = mlx5e_set_features,
2803 2804
	.ndo_change_mtu          = mlx5e_change_mtu,
	.ndo_do_ioctl            = mlx5e_ioctl,
2805
	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
2806 2807 2808
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
#endif
2809
	.ndo_tx_timeout          = mlx5e_tx_timeout,
2810 2811 2812 2813 2814 2815
};

static const struct net_device_ops mlx5e_netdev_ops_sriov = {
	.ndo_open                = mlx5e_open,
	.ndo_stop                = mlx5e_close,
	.ndo_start_xmit          = mlx5e_xmit,
2816 2817
	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
	.ndo_select_queue        = mlx5e_select_queue,
2818 2819 2820 2821 2822 2823 2824 2825
	.ndo_get_stats64         = mlx5e_get_stats,
	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
	.ndo_set_mac_address     = mlx5e_set_mac,
	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
	.ndo_set_features        = mlx5e_set_features,
	.ndo_change_mtu          = mlx5e_change_mtu,
	.ndo_do_ioctl            = mlx5e_ioctl,
2826 2827
	.ndo_udp_tunnel_add	 = mlx5e_add_vxlan_port,
	.ndo_udp_tunnel_del	 = mlx5e_del_vxlan_port,
2828
	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
2829
	.ndo_features_check      = mlx5e_features_check,
2830 2831 2832
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
#endif
2833 2834
	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
	.ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
2835
	.ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
2836
	.ndo_set_vf_trust        = mlx5e_set_vf_trust,
2837 2838 2839
	.ndo_get_vf_config       = mlx5e_get_vf_config,
	.ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
2840
	.ndo_tx_timeout          = mlx5e_tx_timeout,
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
};

static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
		return -ENOTSUPP;
	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
	    !MLX5_CAP_ETH(mdev, csum_cap) ||
	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
2852 2853 2854 2855
	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
	    MLX5_CAP_FLOWTABLE(mdev,
			       flow_table_properties_nic_receive.max_ft_level)
			       < 3) {
2856 2857 2858 2859
		mlx5_core_warn(mdev,
			       "Not creating net device, some required device capabilities are missing\n");
		return -ENOTSUPP;
	}
2860 2861
	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
2862 2863
	if (!MLX5_CAP_GEN(mdev, cq_moderation))
		mlx5_core_warn(mdev, "CQ modiration is not supported\n");
2864

2865 2866 2867
	return 0;
}

2868 2869 2870 2871 2872 2873 2874 2875 2876
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;

	return bf_buf_size -
	       sizeof(struct mlx5e_tx_wqe) +
	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}

2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
#ifdef CONFIG_MLX5_CORE_EN_DCB
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
	int i;

	priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
	for (i = 0; i < priv->params.ets.ets_cap; i++) {
		priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
		priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
		priv->params.ets.prio_tc[i] = i;
	}

	/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
	priv->params.ets.prio_tc[0] = 1;
	priv->params.ets.prio_tc[1] = 0;
}
#endif

2895 2896
void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
				   u32 *indirection_rqt, int len,
2897 2898
				   int num_channels)
{
2899 2900
	int node = mdev->priv.numa_node;
	int node_num_of_cores;
2901 2902
	int i;

2903 2904 2905 2906 2907 2908 2909 2910
	if (node == -1)
		node = first_online_node;

	node_num_of_cores = cpumask_weight(cpumask_of_node(node));

	if (node_num_of_cores)
		num_channels = min_t(int, num_channels, node_num_of_cores);

2911 2912 2913 2914
	for (i = 0; i < len; i++)
		indirection_rqt[i] = i % num_channels;
}

2915 2916 2917 2918 2919 2920 2921
static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
	return MLX5_CAP_GEN(mdev, striding_rq) &&
		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
		MLX5_CAP_ETH(mdev, reg_umr_sq);
}

2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
{
	enum pcie_link_width width;
	enum pci_bus_speed speed;
	int err = 0;

	err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
	if (err)
		return err;

	if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
		return -EINVAL;

	switch (speed) {
	case PCIE_SPEED_2_5GT:
		*pci_bw = 2500 * width;
		break;
	case PCIE_SPEED_5_0GT:
		*pci_bw = 5000 * width;
		break;
	case PCIE_SPEED_8_0GT:
		*pci_bw = 8000 * width;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
{
	return (link_speed && pci_bw &&
		(pci_bw < 40000) && (pci_bw < link_speed));
}

T
Tariq Toukan 已提交
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
	params->rx_cq_period_mode = cq_period_mode;

	params->rx_cq_moderation.pkts =
		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
	params->rx_cq_moderation.usec =
			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;

	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
		params->rx_cq_moderation.usec =
			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
}

2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
				   u8 *min_inline_mode)
{
	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
	case MLX5E_INLINE_MODE_L2:
		*min_inline_mode = MLX5_INLINE_MODE_L2;
		break;
	case MLX5E_INLINE_MODE_VPORT_CONTEXT:
		mlx5_query_nic_vport_min_inline(mdev,
						min_inline_mode);
		break;
	case MLX5_INLINE_MODE_NOT_REQUIRED:
		*min_inline_mode = MLX5_INLINE_MODE_NONE;
		break;
	}
}

2989 2990
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
					struct net_device *netdev,
2991 2992
					const struct mlx5e_profile *profile,
					void *ppriv)
2993 2994
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
2995 2996
	u32 link_speed = 0;
	u32 pci_bw = 0;
2997 2998 2999
	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3000 3001 3002

	priv->params.log_sq_size           =
		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3003
	priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
3004 3005 3006
		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
		MLX5_WQ_TYPE_LINKED_LIST;

3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
	/* set CQE compression */
	priv->params.rx_cqe_compress_admin = false;
	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
	    MLX5_CAP_GEN(mdev, vport_group_manager)) {
		mlx5e_get_max_linkspeed(mdev, &link_speed);
		mlx5e_get_pci_bw(mdev, &pci_bw);
		mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
			      link_speed, pci_bw);
		priv->params.rx_cqe_compress_admin =
			cqe_compress_heuristic(link_speed, pci_bw);
	}

	priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;

3021 3022 3023
	switch (priv->params.rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
3024 3025 3026 3027 3028 3029
		priv->params.mpwqe_log_stride_sz =
			priv->params.rx_cqe_compress ?
			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
			MLX5_MPWRQ_LOG_STRIDE_SIZE;
		priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
			priv->params.mpwqe_log_stride_sz;
3030 3031 3032 3033 3034 3035
		priv->params.lro_en = true;
		break;
	default: /* MLX5_WQ_TYPE_LINKED_LIST */
		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
	}

3036 3037 3038 3039 3040 3041 3042
	mlx5_core_info(mdev,
		       "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
		       priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
		       BIT(priv->params.log_rq_size),
		       BIT(priv->params.mpwqe_log_stride_sz),
		       priv->params.rx_cqe_compress_admin);

3043 3044
	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
					    BIT(priv->params.log_rq_size));
T
Tariq Toukan 已提交
3045

3046 3047
	priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
	mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
T
Tariq Toukan 已提交
3048 3049

	priv->params.tx_cq_moderation.usec =
3050
		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
T
Tariq Toukan 已提交
3051
	priv->params.tx_cq_moderation.pkts =
3052
		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3053
	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
3054
	mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
3055
	priv->params.num_tc                = 1;
3056
	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
3057

3058 3059 3060
	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
			    sizeof(priv->params.toeplitz_hash_key));

3061
	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
3062
				      MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
3063

3064 3065 3066
	priv->params.lro_wqe_sz            =
		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;

T
Tariq Toukan 已提交
3067 3068 3069 3070
	/* Initialize pflags */
	MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
			    priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);

3071 3072
	priv->mdev                         = mdev;
	priv->netdev                       = netdev;
3073 3074
	priv->params.num_channels          = profile->max_nch(mdev);
	priv->profile                      = profile;
3075
	priv->ppriv                        = ppriv;
3076

3077 3078 3079
#ifdef CONFIG_MLX5_CORE_EN_DCB
	mlx5e_ets_init(priv);
#endif
3080 3081 3082 3083 3084

	mutex_init(&priv->state_lock);

	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
3085
	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
3086 3087 3088 3089 3090 3091 3092
	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}

static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

3093
	mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
3094 3095 3096 3097 3098
	if (is_zero_ether_addr(netdev->dev_addr) &&
	    !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
		eth_hw_addr_random(netdev);
		mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
	}
3099 3100
}

3101 3102 3103 3104
static const struct switchdev_ops mlx5e_switchdev_ops = {
	.switchdev_port_attr_get	= mlx5e_attr_get,
};

3105
static void mlx5e_build_nic_netdev(struct net_device *netdev)
3106 3107 3108
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5_core_dev *mdev = priv->mdev;
3109 3110
	bool fcs_supported;
	bool fcs_enabled;
3111 3112 3113

	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);

3114
	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3115
		netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
3116 3117 3118 3119
#ifdef CONFIG_MLX5_CORE_EN_DCB
		netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif
	} else {
3120
		netdev->netdev_ops = &mlx5e_netdev_ops_basic;
3121
	}
3122

3123 3124 3125 3126
	netdev->watchdog_timeo    = 15 * HZ;

	netdev->ethtool_ops	  = &mlx5e_ethtool_ops;

S
Saeed Mahameed 已提交
3127
	netdev->vlan_features    |= NETIF_F_SG;
3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
	netdev->vlan_features    |= NETIF_F_IP_CSUM;
	netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
	netdev->vlan_features    |= NETIF_F_GRO;
	netdev->vlan_features    |= NETIF_F_TSO;
	netdev->vlan_features    |= NETIF_F_TSO6;
	netdev->vlan_features    |= NETIF_F_RXCSUM;
	netdev->vlan_features    |= NETIF_F_RXHASH;

	if (!!MLX5_CAP_ETH(mdev, lro_cap))
		netdev->vlan_features    |= NETIF_F_LRO;

	netdev->hw_features       = netdev->vlan_features;
3140
	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
3141 3142 3143
	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;

3144
	if (mlx5e_vxlan_allowed(mdev)) {
3145 3146 3147
		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
					   NETIF_F_GSO_UDP_TUNNEL_CSUM |
					   NETIF_F_GSO_PARTIAL;
3148
		netdev->hw_enc_features |= NETIF_F_IP_CSUM;
3149
		netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
3150 3151 3152
		netdev->hw_enc_features |= NETIF_F_TSO;
		netdev->hw_enc_features |= NETIF_F_TSO6;
		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
3153 3154 3155
		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
					   NETIF_F_GSO_PARTIAL;
		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3156 3157
	}

3158 3159 3160 3161 3162
	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);

	if (fcs_supported)
		netdev->hw_features |= NETIF_F_RXALL;

3163 3164 3165 3166
	netdev->features          = netdev->hw_features;
	if (!priv->params.lro_en)
		netdev->features  &= ~NETIF_F_LRO;

3167 3168 3169
	if (fcs_enabled)
		netdev->features  &= ~NETIF_F_RXALL;

3170 3171 3172 3173
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
	if (FT_CAP(flow_modify_en) &&
	    FT_CAP(modify_root) &&
	    FT_CAP(identified_miss_table_mode) &&
3174 3175 3176 3177 3178 3179
	    FT_CAP(flow_table_modify)) {
		netdev->hw_features      |= NETIF_F_HW_TC;
#ifdef CONFIG_RFS_ACCEL
		netdev->hw_features	 |= NETIF_F_NTUPLE;
#endif
	}
3180

3181 3182 3183 3184 3185
	netdev->features         |= NETIF_F_HIGHDMA;

	netdev->priv_flags       |= IFF_UNICAST_FLT;

	mlx5e_set_netdev_dev_addr(netdev);
3186 3187 3188 3189 3190

#ifdef CONFIG_NET_SWITCHDEV
	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
3191 3192
}

3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	int err;

	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
	if (err) {
		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
		priv->q_counter = 0;
	}
}

static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
{
	if (!priv->q_counter)
		return;

	mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
}

3213 3214 3215
static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
3216 3217
	u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
					 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
3218 3219 3220
	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
	void *mkc;
	u32 *in;
3221 3222 3223 3224 3225 3226
	int err;

	in = mlx5_vzalloc(inlen);
	if (!in)
		return -ENOMEM;

3227
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
3228

3229 3230
	npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);

3231 3232 3233 3234 3235
	MLX5_SET(mkc, mkc, free, 1);
	MLX5_SET(mkc, mkc, umr_en, 1);
	MLX5_SET(mkc, mkc, lw, 1);
	MLX5_SET(mkc, mkc, lr, 1);
	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
3236

3237 3238 3239 3240
	MLX5_SET(mkc, mkc, qpn, 0xffffff);
	MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
	MLX5_SET64(mkc, mkc, len, npages << PAGE_SHIFT);
	MLX5_SET(mkc, mkc, translations_octword_size,
3241
		 MLX5_MTT_OCTW(npages));
3242
	MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
3243

3244
	err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen);
3245

3246
	kvfree(in);
3247 3248 3249
	return err;
}

3250 3251
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
			   struct net_device *netdev,
3252 3253
			   const struct mlx5e_profile *profile,
			   void *ppriv)
3254 3255 3256
{
	struct mlx5e_priv *priv = netdev_priv(netdev);

3257
	mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
3258 3259 3260 3261 3262 3263
	mlx5e_build_nic_netdev(netdev);
	mlx5e_vxlan_init(priv);
}

static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
3264 3265 3266
	struct mlx5_core_dev *mdev = priv->mdev;
	struct mlx5_eswitch *esw = mdev->priv.eswitch;

3267
	mlx5e_vxlan_cleanup(priv);
3268 3269 3270

	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		mlx5_eswitch_unregister_vport_rep(esw, 0);
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
}

static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
	struct mlx5_core_dev *mdev = priv->mdev;
	int err;
	int i;

	err = mlx5e_create_indirect_rqts(priv);
	if (err) {
		mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
		return err;
	}

	err = mlx5e_create_direct_rqts(priv);
	if (err) {
		mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
		goto err_destroy_indirect_rqts;
	}

	err = mlx5e_create_indirect_tirs(priv);
	if (err) {
		mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
		goto err_destroy_direct_rqts;
	}

	err = mlx5e_create_direct_tirs(priv);
	if (err) {
		mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
		goto err_destroy_indirect_tirs;
	}

	err = mlx5e_create_flow_steering(priv);
	if (err) {
		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
		goto err_destroy_direct_tirs;
	}

	err = mlx5e_tc_init(priv);
	if (err)
		goto err_destroy_flow_steering;

	return 0;

err_destroy_flow_steering:
	mlx5e_destroy_flow_steering(priv);
err_destroy_direct_tirs:
	mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
	mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
	for (i = 0; i < priv->profile->max_nch(mdev); i++)
		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
err_destroy_indirect_rqts:
	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
	return err;
}

static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
	int i;

	mlx5e_tc_cleanup(priv);
	mlx5e_destroy_flow_steering(priv);
	mlx5e_destroy_direct_tirs(priv);
	mlx5e_destroy_indirect_tirs(priv);
	for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
}

static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
	int err;

	err = mlx5e_create_tises(priv);
	if (err) {
		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
		return err;
	}

#ifdef CONFIG_MLX5_CORE_EN_DCB
	mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
#endif
	return 0;
}

static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
	struct net_device *netdev = priv->netdev;
	struct mlx5_core_dev *mdev = priv->mdev;
3362 3363
	struct mlx5_eswitch *esw = mdev->priv.eswitch;
	struct mlx5_eswitch_rep rep;
3364

3365 3366
	mlx5_lag_add(mdev, netdev);

3367 3368 3369 3370 3371 3372 3373 3374
	if (mlx5e_vxlan_allowed(mdev)) {
		rtnl_lock();
		udp_tunnel_get_rx_info(netdev);
		rtnl_unlock();
	}

	mlx5e_enable_async_events(priv);
	queue_work(priv->wq, &priv->set_rx_mode_work);
3375 3376

	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3377
		mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
3378 3379
		rep.load = mlx5e_nic_rep_load;
		rep.unload = mlx5e_nic_rep_unload;
3380 3381 3382 3383
		rep.vport = 0;
		rep.priv_data = priv;
		mlx5_eswitch_register_vport_rep(esw, &rep);
	}
3384 3385 3386 3387 3388 3389
}

static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
	queue_work(priv->wq, &priv->set_rx_mode_work);
	mlx5e_disable_async_events(priv);
3390
	mlx5_lag_remove(priv->mdev);
3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
}

static const struct mlx5e_profile mlx5e_nic_profile = {
	.init		   = mlx5e_nic_init,
	.cleanup	   = mlx5e_nic_cleanup,
	.init_rx	   = mlx5e_init_nic_rx,
	.cleanup_rx	   = mlx5e_cleanup_nic_rx,
	.init_tx	   = mlx5e_init_nic_tx,
	.cleanup_tx	   = mlx5e_cleanup_nic_tx,
	.enable		   = mlx5e_nic_enable,
	.disable	   = mlx5e_nic_disable,
	.update_stats	   = mlx5e_update_stats,
	.max_nch	   = mlx5e_get_max_num_channels,
	.max_tc		   = MLX5E_MAX_NUM_TC,
};

3407 3408 3409
struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
				       const struct mlx5e_profile *profile,
				       void *ppriv)
3410
{
3411
	int nch = profile->max_nch(mdev);
3412 3413 3414
	struct net_device *netdev;
	struct mlx5e_priv *priv;

3415
	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
3416
				    nch * profile->max_tc,
3417
				    nch);
3418 3419 3420 3421 3422
	if (!netdev) {
		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
		return NULL;
	}

3423
	profile->init(mdev, netdev, profile, ppriv);
3424 3425 3426 3427 3428

	netif_carrier_off(netdev);

	priv = netdev_priv(netdev);

3429 3430
	priv->wq = create_singlethread_workqueue("mlx5e");
	if (!priv->wq)
3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450
		goto err_cleanup_nic;

	return netdev;

err_cleanup_nic:
	profile->cleanup(priv);
	free_netdev(netdev);

	return NULL;
}

int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
	const struct mlx5e_profile *profile;
	struct mlx5e_priv *priv;
	int err;

	priv = netdev_priv(netdev);
	profile = priv->profile;
	clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
3451

3452 3453 3454
	err = mlx5e_create_umr_mkey(priv);
	if (err) {
		mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
3455
		goto out;
3456 3457
	}

3458 3459
	err = profile->init_tx(priv);
	if (err)
3460
		goto err_destroy_umr_mkey;
3461 3462 3463 3464

	err = mlx5e_open_drop_rq(priv);
	if (err) {
		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
3465
		goto err_cleanup_tx;
3466 3467
	}

3468 3469
	err = profile->init_rx(priv);
	if (err)
3470 3471
		goto err_close_drop_rq;

3472 3473
	mlx5e_create_q_counter(priv);

3474
	mlx5e_init_l2_addr(priv);
3475

3476 3477
	mlx5e_set_dev_port_mtu(netdev);

3478 3479
	if (profile->enable)
		profile->enable(priv);
3480

3481 3482 3483 3484 3485
	rtnl_lock();
	if (netif_running(netdev))
		mlx5e_open(netdev);
	netif_device_attach(netdev);
	rtnl_unlock();
3486

3487
	return 0;
3488 3489 3490 3491

err_close_drop_rq:
	mlx5e_close_drop_rq(priv);

3492 3493
err_cleanup_tx:
	profile->cleanup_tx(priv);
3494

3495 3496 3497
err_destroy_umr_mkey:
	mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);

3498 3499
out:
	return err;
3500 3501
}

3502 3503 3504 3505 3506
static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
{
	struct mlx5_eswitch *esw = mdev->priv.eswitch;
	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
	int vport;
3507
	u8 mac[ETH_ALEN];
3508 3509 3510 3511

	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
		return;

3512 3513
	mlx5_query_nic_vport_mac_address(mdev, 0, mac);

3514 3515 3516
	for (vport = 1; vport < total_vfs; vport++) {
		struct mlx5_eswitch_rep rep;

3517 3518
		rep.load = mlx5e_vport_rep_load;
		rep.unload = mlx5e_vport_rep_unload;
3519
		rep.vport = vport;
3520
		ether_addr_copy(rep.hw_id, mac);
3521 3522 3523 3524
		mlx5_eswitch_register_vport_rep(esw, &rep);
	}
}

3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	const struct mlx5e_profile *profile = priv->profile;

	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
	if (profile->disable)
		profile->disable(priv);

	flush_workqueue(priv->wq);

	rtnl_lock();
	if (netif_running(netdev))
		mlx5e_close(netdev);
	netif_device_detach(netdev);
	rtnl_unlock();

	mlx5e_destroy_q_counter(priv);
	profile->cleanup_rx(priv);
	mlx5e_close_drop_rq(priv);
	profile->cleanup_tx(priv);
	mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
	cancel_delayed_work_sync(&priv->update_stats_work);
}

/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
 * hardware contexts and to connect it to the current netdev.
 */
static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
{
	struct mlx5e_priv *priv = vpriv;
	struct net_device *netdev = priv->netdev;
	int err;

	if (netif_device_present(netdev))
		return 0;

	err = mlx5e_create_mdev_resources(mdev);
	if (err)
		return err;

	err = mlx5e_attach_netdev(mdev, netdev);
	if (err) {
		mlx5e_destroy_mdev_resources(mdev);
		return err;
	}

	return 0;
}

static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
{
	struct mlx5e_priv *priv = vpriv;
	struct net_device *netdev = priv->netdev;

	if (!netif_device_present(netdev))
		return;

	mlx5e_detach_netdev(mdev, netdev);
	mlx5e_destroy_mdev_resources(mdev);
}

3587 3588
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
3589
	struct mlx5_eswitch *esw = mdev->priv.eswitch;
3590
	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3591
	void *ppriv = NULL;
3592 3593 3594 3595
	void *priv;
	int vport;
	int err;
	struct net_device *netdev;
3596

3597 3598
	err = mlx5e_check_required_hca_cap(mdev);
	if (err)
3599 3600
		return NULL;

3601 3602 3603 3604 3605
	mlx5e_register_vport_rep(mdev);

	if (MLX5_CAP_GEN(mdev, vport_group_manager))
		ppriv = &esw->offloads.vport_reps[0];

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623
	netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
	if (!netdev) {
		mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
		goto err_unregister_reps;
	}

	priv = netdev_priv(netdev);

	err = mlx5e_attach(mdev, priv);
	if (err) {
		mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
		goto err_destroy_netdev;
	}

	err = register_netdev(netdev);
	if (err) {
		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
		goto err_detach;
3624
	}
3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638

	return priv;

err_detach:
	mlx5e_detach(mdev, priv);

err_destroy_netdev:
	mlx5e_destroy_netdev(mdev, priv);

err_unregister_reps:
	for (vport = 1; vport < total_vfs; vport++)
		mlx5_eswitch_unregister_vport_rep(esw, vport);

	return NULL;
3639 3640
}

3641
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
3642
{
3643
	const struct mlx5e_profile *profile = priv->profile;
3644 3645
	struct net_device *netdev = priv->netdev;

3646
	unregister_netdev(netdev);
3647
	destroy_workqueue(priv->wq);
3648 3649
	if (profile->cleanup)
		profile->cleanup(priv);
3650
	free_netdev(netdev);
3651 3652
}

3653 3654
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{
3655 3656
	struct mlx5_eswitch *esw = mdev->priv.eswitch;
	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3657
	struct mlx5e_priv *priv = vpriv;
3658
	int vport;
3659

3660 3661 3662
	for (vport = 1; vport < total_vfs; vport++)
		mlx5_eswitch_unregister_vport_rep(esw, vport);

3663 3664
	mlx5e_detach(mdev, vpriv);
	mlx5e_destroy_netdev(mdev, priv);
3665 3666
}

3667 3668 3669 3670 3671 3672 3673 3674
static void *mlx5e_get_netdev(void *vpriv)
{
	struct mlx5e_priv *priv = vpriv;

	return priv->netdev;
}

static struct mlx5_interface mlx5e_interface = {
3675 3676
	.add       = mlx5e_add,
	.remove    = mlx5e_remove,
3677 3678
	.attach    = mlx5e_attach,
	.detach    = mlx5e_detach,
3679 3680 3681 3682 3683 3684 3685
	.event     = mlx5e_async_event,
	.protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
	.get_dev   = mlx5e_get_netdev,
};

void mlx5e_init(void)
{
3686
	mlx5e_build_ptys2ethtool_map();
3687 3688 3689 3690 3691 3692 3693
	mlx5_register_interface(&mlx5e_interface);
}

void mlx5e_cleanup(void)
{
	mlx5_unregister_interface(&mlx5e_interface);
}