cxgb4_main.c 190.1 KB
Newer Older
1 2 3
/*
 * This file is part of the Chelsio T4 Ethernet driver for Linux.
 *
4
 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
44
#include <linux/if.h>
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/sockios.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
63
#include <net/addrconf.h>
64
#include <net/bonding.h>
65
#include <linux/uaccess.h>
66
#include <linux/crash_dump.h>
67
#include <net/udp_tunnel.h>
68
#include <net/xfrm.h>
69 70 71
#if defined(CONFIG_CHELSIO_TLS_DEVICE)
#include <net/tls.h>
#endif
72 73

#include "cxgb4.h"
74
#include "cxgb4_filter.h"
75
#include "t4_regs.h"
76
#include "t4_values.h"
77 78
#include "t4_msg.h"
#include "t4fw_api.h"
79
#include "t4fw_version.h"
80
#include "cxgb4_dcb.h"
81
#include "srq.h"
82
#include "cxgb4_debugfs.h"
83
#include "clip_tbl.h"
84
#include "l2t.h"
85
#include "smt.h"
86
#include "sched.h"
87
#include "cxgb4_tc_u32.h"
88
#include "cxgb4_tc_flower.h"
89
#include "cxgb4_tc_mqprio.h"
90
#include "cxgb4_tc_matchall.h"
91
#include "cxgb4_ptp.h"
92
#include "cxgb4_cudbg.h"
93

94 95
char cxgb4_driver_name[] = KBUILD_MODNAME;

96
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
97 98 99 100 101

#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)

102 103 104
/* Macros needed to support the PCI Device ID Table ...
 */
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105
	static const struct pci_device_id cxgb4_pci_tbl[] = {
G
Ganesh Goudar 已提交
106 107 108
#define CXGB4_UNIFIED_PF 0x4

#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
109

110 111 112 113 114 115
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
 * called for both.
 */
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0

#define CH_PCI_ID_TABLE_ENTRY(devid) \
G
Ganesh Goudar 已提交
116
		{PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
117 118 119 120 121 122

#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
		{ 0, } \
	}

#include "t4_pci_id_tbl.h"
123

124
#define FW4_FNAME "cxgb4/t4fw.bin"
S
Santosh Rastapur 已提交
125
#define FW5_FNAME "cxgb4/t5fw.bin"
126
#define FW6_FNAME "cxgb4/t6fw.bin"
127
#define FW4_CFNAME "cxgb4/t4-config.txt"
S
Santosh Rastapur 已提交
128
#define FW5_CFNAME "cxgb4/t5-config.txt"
129
#define FW6_CFNAME "cxgb4/t6-config.txt"
130 131 132 133
#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
#define PHY_AQ1202_DEVICEID 0x4409
#define PHY_BCM84834_DEVICEID 0x4486
134 135 136 137 138

MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139
MODULE_FIRMWARE(FW4_FNAME);
S
Santosh Rastapur 已提交
140
MODULE_FIRMWARE(FW5_FNAME);
141
MODULE_FIRMWARE(FW6_FNAME);
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

/*
 * The driver uses the best interrupt scheme available on a platform in the
 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 * of these schemes the driver may consider as follows:
 *
 * msi = 2: choose from among all three options
 * msi = 1: only consider MSI and INTx interrupts
 * msi = 0: force INTx interrupts
 */
static int msi = 2;

module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");

157 158 159 160 161 162 163 164 165 166 167 168 169 170
/*
 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 * boundaries.  This is a requirement for many architectures which will throw
 * a machine check fault if an attempt is made to access one of the 4-byte IP
 * header fields on a non-4-byte boundary.  And it's a major performance issue
 * even on some architectures which allow it like some implementations of the
 * x86 ISA.  However, some architectures don't mind this and for some very
 * edge-case performance sensitive applications (like forwarding large volumes
 * of small packets), setting this DMA offset to 0 will decrease the number of
 * PCI-E Bus transfers enough to measurably affect performance.
 */
static int rx_dma_offset = 2;

171 172 173 174 175 176 177 178 179 180 181
/* TX Queue select used to determine what algorithm to use for selecting TX
 * queue. Select between the kernel provided function (select_queue=0) or user
 * cxgb_select_queue function (select_queue=1)
 *
 * Default: select_queue=0
 */
static int select_queue;
module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");

182 183
static struct dentry *cxgb4_debugfs_root;

184 185
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
186
LIST_HEAD(uld_list);
187

V
Vishal Kulkarni 已提交
188 189
static int cfg_queues(struct adapter *adap);

190 191 192 193 194 195 196
static void link_report(struct net_device *dev)
{
	if (!netif_carrier_ok(dev))
		netdev_info(dev, "link down\n");
	else {
		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };

197
		const char *s;
198 199 200
		const struct port_info *p = netdev_priv(dev);

		switch (p->link_cfg.speed) {
201 202
		case 100:
			s = "100Mbps";
203
			break;
204
		case 1000:
205
			s = "1Gbps";
206
			break;
207 208 209 210 211
		case 10000:
			s = "10Gbps";
			break;
		case 25000:
			s = "25Gbps";
212
			break;
213
		case 40000:
214 215
			s = "40Gbps";
			break;
216 217 218
		case 50000:
			s = "50Gbps";
			break;
219 220 221
		case 100000:
			s = "100Gbps";
			break;
222 223 224 225
		default:
			pr_info("%s: unsupported speed: %d\n",
				dev->name, p->link_cfg.speed);
			return;
226 227 228 229 230 231 232
		}

		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
			    fc[p->link_cfg.fc]);
	}
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
#ifdef CONFIG_CHELSIO_T4_DCB
/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
	int i;

	/* We use a simple mapping of Port TX Queue Index to DCB
	 * Priority when we're enabling DCB.
	 */
	for (i = 0; i < pi->nqsets; i++, txq++) {
		u32 name, value;
		int err;

249 250 251 252
		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
			FW_PARAMS_PARAM_X_V(
				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
253 254 255 256 257 258
		value = enable ? i : 0xffffffff;

		/* Since we can be called while atomic (from "interrupt
		 * level") we need to issue the Set Parameters Commannd
		 * without sleeping (timeout < 0).
		 */
259
		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
260 261
					    &name, &value,
					    -FW_CMD_MAX_TIMEOUT);
262 263 264 265 266

		if (err)
			dev_err(adap->pdev_dev,
				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
				enable ? "set" : "unset", pi->port_id, i, -err);
267
		else
268
			txq->dcb_prio = enable ? value : 0;
269 270 271
	}
}

272
int cxgb4_dcb_enabled(const struct net_device *dev)
273 274 275 276 277 278 279 280 281
{
	struct port_info *pi = netdev_priv(dev);

	if (!pi->dcb.enabled)
		return 0;

	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
}
A
Arnd Bergmann 已提交
282
#endif /* CONFIG_CHELSIO_T4_DCB */
283

284 285 286 287 288 289 290 291
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
	struct net_device *dev = adapter->port[port_id];

	/* Skip changes from disabled ports. */
	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
		if (link_stat)
			netif_carrier_on(dev);
292 293
		else {
#ifdef CONFIG_CHELSIO_T4_DCB
294
			if (cxgb4_dcb_enabled(dev)) {
295
				cxgb4_dcb_reset(dev);
296 297
				dcb_tx_queue_prio_enable(dev, false);
			}
298
#endif /* CONFIG_CHELSIO_T4_DCB */
299
			netif_carrier_off(dev);
300
		}
301 302 303 304 305

		link_report(dev);
	}
}

306
void t4_os_portmod_changed(struct adapter *adap, int port_id)
307 308
{
	static const char *mod_str[] = {
309
		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 311
	};

312 313
	struct net_device *dev = adap->port[port_id];
	struct port_info *pi = netdev_priv(dev);
314 315 316

	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
		netdev_info(dev, "port module unplugged\n");
317
	else if (pi->mod_type < ARRAY_SIZE(mod_str))
318
		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319 320 321 322 323 324 325 326 327 328 329
	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
		netdev_info(dev, "%s: unsupported port module inserted\n",
			    dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
		netdev_info(dev, "%s: unknown port module inserted\n",
			    dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
		netdev_info(dev, "%s: transceiver module error\n", dev->name);
	else
		netdev_info(dev, "%s: unknown module type %d inserted\n",
			    dev->name, pi->mod_type);
330 331 332 333 334

	/* If the interface is running, then we'll need any "sticky" Link
	 * Parameters redone with a new Transceiver Module.
	 */
	pi->link_cfg.redo_l1cfg = netif_running(dev);
335 336
}

337 338 339 340
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");

341
/*
342
 * usecs to sleep while draining the dbfifo
343
 */
344 345 346 347 348 349
static int dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay,
		 "usecs to sleep while draining the dbfifo");

static inline int cxgb4_set_addr_hash(struct port_info *pi)
350
{
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	struct adapter *adap = pi->adapter;
	u64 vec = 0;
	bool ucast = false;
	struct hash_mac_addr *entry;

	/* Calculate the hash vector for the updated list and program it */
	list_for_each_entry(entry, &adap->mac_hlist, list) {
		ucast |= is_unicast_ether_addr(entry->addr);
		vec |= (1ULL << hash_mac_addr(entry->addr));
	}
	return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
				vec, false);
}

static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adap = pi->adapter;
	int ret;
370 371
	u64 mhash = 0;
	u64 uhash = 0;
372 373 374 375 376 377
	/* idx stores the index of allocated filters,
	 * its size should be modified based on the number of
	 * MAC addresses that we allocate filters for
	 */

	u16 idx[1] = {};
378 379 380 381 382
	bool free = false;
	bool ucast = is_unicast_ether_addr(mac_addr);
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *new_entry;

383 384
	ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
				   idx, ucast ? &uhash : &mhash, false);
385 386 387 388 389 390 391 392 393 394 395 396 397
	if (ret < 0)
		goto out;
	/* if hash != 0, then add the addr to hash addr list
	 * so on the end we will calculate the hash for the
	 * list and program it
	 */
	if (uhash || mhash) {
		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
		if (!new_entry)
			return -ENOMEM;
		ether_addr_copy(new_entry->addr, mac_addr);
		list_add_tail(&new_entry->list, &adap->mac_hlist);
		ret = cxgb4_set_addr_hash(pi);
398
	}
399 400 401
out:
	return ret < 0 ? ret : 0;
}
402

403 404 405 406 407 408 409
static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adap = pi->adapter;
	int ret;
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *entry, *tmp;
410

411 412 413 414 415 416 417 418
	/* If the MAC address to be removed is in the hash addr
	 * list, delete it from the list and update hash vector
	 */
	list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
		if (ether_addr_equal(entry->addr, mac_addr)) {
			list_del(&entry->list);
			kfree(entry);
			return cxgb4_set_addr_hash(pi);
419 420 421
		}
	}

422
	ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
423
	return ret < 0 ? -EINVAL : 0;
424 425 426 427 428 429 430 431 432
}

/*
 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 * If @mtu is -1 it is left unchanged.
 */
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
	struct port_info *pi = netdev_priv(dev);
433
	struct adapter *adapter = pi->adapter;
434

435 436
	__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
	__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
437

438 439
	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
			     mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
440 441
			     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
			     sleep_ok);
442 443 444
}

/**
445 446 447 448 449 450 451
 *	cxgb4_change_mac - Update match filter for a MAC address.
 *	@pi: the port_info
 *	@viid: the VI id
 *	@tcam_idx: TCAM index of existing filter for old value of MAC address,
 *		   or -1
 *	@addr: the new MAC address value
 *	@persist: whether a new MAC allocation should be persistent
452
 *	@smt_idx: the destination to store the new SMT index.
453 454 455 456 457 458 459 460
 *
 *	Modifies an MPS filter and sets it to the new MAC address if
 *	@tcam_idx >= 0, or adds the MAC address to a new filter if
 *	@tcam_idx < 0. In the latter case the address is added persistently
 *	if @persist is %true.
 *	Addresses are programmed to hash region, if tcam runs out of entries.
 *
 */
461 462 463
int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
		     int *tcam_idx, const u8 *addr, bool persist,
		     u8 *smt_idx)
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
{
	struct adapter *adapter = pi->adapter;
	struct hash_mac_addr *entry, *new_entry;
	int ret;

	ret = t4_change_mac(adapter, adapter->mbox, viid,
			    *tcam_idx, addr, persist, smt_idx);
	/* We ran out of TCAM entries. try programming hash region. */
	if (ret == -ENOMEM) {
		/* If the MAC address to be updated is in the hash addr
		 * list, update it from the list
		 */
		list_for_each_entry(entry, &adapter->mac_hlist, list) {
			if (entry->iface_mac) {
				ether_addr_copy(entry->addr, addr);
				goto set_hash;
			}
		}
		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
		if (!new_entry)
			return -ENOMEM;
		ether_addr_copy(new_entry->addr, addr);
		new_entry->iface_mac = true;
		list_add_tail(&new_entry->list, &adapter->mac_hlist);
set_hash:
		ret = cxgb4_set_addr_hash(pi);
	} else if (ret >= 0) {
		*tcam_idx = ret;
		ret = 0;
	}

	return ret;
}

/*
499 500 501 502 503 504 505 506
 *	link_start - enable a port
 *	@dev: the port to enable
 *
 *	Performs the MAC and PHY actions needed to enable a port.
 */
static int link_start(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
507 508
	unsigned int mb = pi->adapter->mbox;
	int ret;
509 510 511 512 513

	/*
	 * We do not set address filters and promiscuity here, the stack does
	 * that step explicitly.
	 */
514 515
	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
			    dev->mtu, -1, -1, -1,
516
			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
517
	if (ret == 0)
518 519
		ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
					    dev->dev_addr, true, &pi->smt_idx);
520
	if (ret == 0)
521
		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
522
				    &pi->link_cfg);
523 524
	if (ret == 0) {
		local_bh_disable();
525
		ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
526
					  true, CXGB4_DCB_ENABLED);
527 528
		local_bh_enable();
	}
529

530 531 532
	return ret;
}

533 534 535 536
#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
537
	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
538
	struct net_device *dev = adap->port[adap->chan_map[port]];
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
	int new_dcb_enabled;

	cxgb4_dcb_handle_fw_update(adap, pcmd);
	new_dcb_enabled = cxgb4_dcb_enabled(dev);

	/* If the DCB has become enabled or disabled on the port then we're
	 * going to need to set up/tear down DCB Priority parameters for the
	 * TX Queues associated with the port.
	 */
	if (new_dcb_enabled != old_dcb_enabled)
		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
}
#endif /* CONFIG_CHELSIO_T4_DCB */

V
Vipul Pandya 已提交
554
/* Response queue handler for the FW event queue.
555 556 557 558 559 560 561
 */
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
			  const struct pkt_gl *gl)
{
	u8 opcode = ((const struct rss_header *)rsp)->opcode;

	rsp++;                                          /* skip RSS header */
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576

	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
	 */
	if (unlikely(opcode == CPL_FW4_MSG &&
	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
		rsp++;
		opcode = ((const struct rss_header *)rsp)->opcode;
		rsp++;
		if (opcode != CPL_SGE_EGR_UPDATE) {
			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
				, opcode);
			goto out;
		}
	}

577 578
	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
		const struct cpl_sge_egr_update *p = (void *)rsp;
579
		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
580
		struct sge_txq *txq;
581

582
		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
583
		txq->restarts++;
584
		if (txq->q_type == CXGB4_TXQ_ETH) {
585 586 587
			struct sge_eth_txq *eq;

			eq = container_of(txq, struct sge_eth_txq, q);
588
			t4_sge_eth_txq_egress_update(q->adap, eq, -1);
589
		} else {
590
			struct sge_uld_txq *oq;
591

592
			oq = container_of(txq, struct sge_uld_txq, q);
593 594 595 596 597
			tasklet_schedule(&oq->qresume_tsk);
		}
	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
		const struct cpl_fw6_msg *p = (void *)rsp;

598 599
#ifdef CONFIG_CHELSIO_T4_DCB
		const struct fw_port_cmd *pcmd = (const void *)p->data;
600
		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
601
		unsigned int action =
602
			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
603 604

		if (cmd == FW_PORT_CMD &&
605 606
		    (action == FW_PORT_ACTION_GET_PORT_INFO ||
		     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
607
			int port = FW_PORT_CMD_PORTID_G(
608
					be32_to_cpu(pcmd->op_to_portid));
609 610 611 612 613
			struct net_device *dev;
			int dcbxdis, state_input;

			dev = q->adap->port[q->adap->chan_map[port]];
			dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
614 615 616
			  ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
			  : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
			       & FW_PORT_CMD_DCBXDIS32_F));
617 618 619
			state_input = (dcbxdis
				       ? CXGB4_DCB_INPUT_FW_DISABLED
				       : CXGB4_DCB_INPUT_FW_ENABLED);
620 621 622 623 624 625 626 627 628 629 630

			cxgb4_dcb_state_fsm(dev, state_input);
		}

		if (cmd == FW_PORT_CMD &&
		    action == FW_PORT_ACTION_L2_DCB_CFG)
			dcb_rpl(q->adap, pcmd);
		else
#endif
			if (p->type == 0)
				t4_handle_fw_rpl(q->adap, p->data);
631 632 633 634
	} else if (opcode == CPL_L2T_WRITE_RPL) {
		const struct cpl_l2t_write_rpl *p = (void *)rsp;

		do_l2t_write_rpl(q->adap, p);
635 636 637 638
	} else if (opcode == CPL_SMT_WRITE_RPL) {
		const struct cpl_smt_write_rpl *p = (void *)rsp;

		do_smt_write_rpl(q->adap, p);
V
Vipul Pandya 已提交
639 640 641 642
	} else if (opcode == CPL_SET_TCB_RPL) {
		const struct cpl_set_tcb_rpl *p = (void *)rsp;

		filter_rpl(q->adap, p);
643 644 645 646
	} else if (opcode == CPL_ACT_OPEN_RPL) {
		const struct cpl_act_open_rpl *p = (void *)rsp;

		hash_filter_rpl(q->adap, p);
647 648 649 650
	} else if (opcode == CPL_ABORT_RPL_RSS) {
		const struct cpl_abort_rpl_rss *p = (void *)rsp;

		hash_del_filter_rpl(q->adap, p);
651 652 653 654
	} else if (opcode == CPL_SRQ_TABLE_RPL) {
		const struct cpl_srq_table_rpl *p = (void *)rsp;

		do_srq_table_rpl(q->adap, p);
655 656 657
	} else
		dev_err(q->adap->pdev_dev,
			"unexpected CPL %#x on FW event queue\n", opcode);
658
out:
659 660 661 662 663
	return 0;
}

static void disable_msi(struct adapter *adapter)
{
664
	if (adapter->flags & CXGB4_USING_MSIX) {
665
		pci_disable_msix(adapter->pdev);
666 667
		adapter->flags &= ~CXGB4_USING_MSIX;
	} else if (adapter->flags & CXGB4_USING_MSI) {
668
		pci_disable_msi(adapter->pdev);
669
		adapter->flags &= ~CXGB4_USING_MSI;
670 671 672 673 674 675 676 677 678
	}
}

/*
 * Interrupt handler for non-data events used with MSI-X.
 */
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{
	struct adapter *adap = cookie;
679
	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
680

681
	if (v & PFSW_F) {
682
		adap->swintr = 1;
683
		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
684
	}
685
	if (adap->flags & CXGB4_MASTER_PF)
686
		t4_slow_intr_handler(adap);
687 688 689
	return IRQ_HANDLED;
}

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
		       cpumask_var_t *aff_mask, int idx)
{
	int rv;

	if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
		dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
		return -ENOMEM;
	}

	cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
			*aff_mask);

	rv = irq_set_affinity_hint(vec, *aff_mask);
	if (rv)
		dev_warn(adap->pdev_dev,
			 "irq_set_affinity_hint %u failed %d\n",
			 vec, rv);

	return 0;
}

void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
{
	irq_set_affinity_hint(vec, NULL);
	free_cpumask_var(aff_mask);
}

718 719 720
static int request_msix_queue_irqs(struct adapter *adap)
{
	struct sge *s = &adap->sge;
721
	struct msix_info *minfo;
722
	int err, ethqidx;
723

724 725 726 727 728 729 730
	if (s->fwevtq_msix_idx < 0)
		return -ENOMEM;

	err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
			  t4_sge_intr_msix, 0,
			  adap->msix_info[s->fwevtq_msix_idx].desc,
			  &s->fw_evtq);
731 732 733 734
	if (err)
		return err;

	for_each_ethrxq(s, ethqidx) {
735
		minfo = s->ethrxq[ethqidx].msix;
736
		err = request_irq(minfo->vec,
737
				  t4_sge_intr_msix, 0,
738
				  minfo->desc,
739 740 741
				  &s->ethrxq[ethqidx].rspq);
		if (err)
			goto unwind;
742 743 744

		cxgb4_set_msix_aff(adap, minfo->vec,
				   &minfo->aff_mask, ethqidx);
745 746 747 748
	}
	return 0;

unwind:
749
	while (--ethqidx >= 0) {
750
		minfo = s->ethrxq[ethqidx].msix;
751 752 753
		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
		free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
	}
754
	free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
755 756 757 758 759 760
	return err;
}

static void free_msix_queue_irqs(struct adapter *adap)
{
	struct sge *s = &adap->sge;
761
	struct msix_info *minfo;
762
	int i;
763

764
	free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
765
	for_each_ethrxq(s, i) {
766
		minfo = s->ethrxq[i].msix;
767 768 769
		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
		free_irq(minfo->vec, &s->ethrxq[i].rspq);
	}
770 771
}

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
static int setup_ppod_edram(struct adapter *adap)
{
	unsigned int param, val;
	int ret;

	/* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
	 * if firmware supports ppod edram feature or not. If firmware
	 * returns 1, then driver can enable this feature by sending
	 * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
	 * enable ppod edram feature.
	 */
	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));

	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
	if (ret < 0) {
		dev_warn(adap->pdev_dev,
			 "querying PPOD_EDRAM support failed: %d\n",
			 ret);
		return -1;
	}

	if (val != 1)
		return -1;

	ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
	if (ret < 0) {
		dev_err(adap->pdev_dev,
			"setting PPOD_EDRAM failed: %d\n", ret);
		return -1;
	}
	return 0;
}

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
static void adap_config_hpfilter(struct adapter *adapter)
{
	u32 param, val = 0;
	int ret;

	/* Enable HP filter region. Older fw will fail this request and
	 * it is fine.
	 */
	param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
	ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
			    1, &param, &val);

	/* An error means FW doesn't know about HP filter support,
	 * it's not a problem, don't return an error.
	 */
	if (ret < 0)
		dev_err(adapter->pdev_dev,
			"HP filter region isn't supported by FW\n");
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
			    u16 rss_size, u16 viid)
{
	struct adapter *adap = pi->adapter;
	int ret;

	ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
				  rss_size);
	if (ret)
		return ret;

	/* If Tunnel All Lookup isn't specified in the global RSS
	 * Configuration, then we need to specify a default Ingress
	 * Queue for any ingress packets which aren't hashed.  We'll
	 * use our first ingress queue ...
	 */
	return t4_config_vi_rss(adap, adap->mbox, viid,
				FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
				FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
				FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
				FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
				FW_RSS_VI_CONFIG_CMD_UDPEN_F,
				rss[0]);
}

851
/**
852
 *	cxgb4_write_rss - write the RSS table for a given port
853 854 855 856 857
 *	@pi: the port
 *	@queues: array of queue indices for RSS
 *
 *	Sets up the portion of the HW RSS table for the port's VI to distribute
 *	packets to the Rx queues in @queues.
858
 *	Should never be called before setting up sge eth rx queues
859
 */
860
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
861
{
862 863
	struct adapter *adapter = pi->adapter;
	const struct sge_eth_rxq *rxq;
864 865
	int i, err;
	u16 *rss;
866

867
	rxq = &adapter->sge.ethrxq[pi->first_qset];
868
	rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
869 870 871 872 873
	if (!rss)
		return -ENOMEM;

	/* map the queue indices to queue ids */
	for (i = 0; i < pi->rss_size; i++, queues++)
874
		rss[i] = rxq[*queues].rspq.abs_id;
875

876
	err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
877 878 879 880
	kfree(rss);
	return err;
}

881 882 883 884
/**
 *	setup_rss - configure RSS
 *	@adap: the adapter
 *
885
 *	Sets up RSS for each port.
886 887 888
 */
static int setup_rss(struct adapter *adap)
{
889
	int i, j, err;
890 891 892 893

	for_each_port(adap, i) {
		const struct port_info *pi = adap2pinfo(adap, i);

894 895 896 897
		/* Fill default values with equal distribution */
		for (j = 0; j < pi->rss_size; j++)
			pi->rss[j] = j % pi->nqsets;

898
		err = cxgb4_write_rss(pi, pi->rss);
899 900 901 902 903 904
		if (err)
			return err;
	}
	return 0;
}

905 906 907 908 909 910 911 912 913
/*
 * Return the channel of the ingress queue with the given qid.
 */
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
{
	qid -= p->ingr_start;
	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}

914 915 916 917 918 919
void cxgb4_quiesce_rx(struct sge_rspq *q)
{
	if (q->handler)
		napi_disable(&q->napi);
}

920 921 922 923 924 925 926
/*
 * Wait until all NAPI handlers are descheduled.
 */
static void quiesce_rx(struct adapter *adap)
{
	int i;

927
	for (i = 0; i < adap->sge.ingr_sz; i++) {
928 929
		struct sge_rspq *q = adap->sge.ingr_map[i];

930 931 932 933
		if (!q)
			continue;

		cxgb4_quiesce_rx(q);
934 935 936
	}
}

937 938 939
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
940 941
	struct sge *s = &adap->sge;

942
	if (adap->flags & CXGB4_FULL_INIT_DONE) {
943
		t4_intr_disable(adap);
944
		if (adap->flags & CXGB4_USING_MSIX) {
945
			free_msix_queue_irqs(adap);
946 947
			free_irq(adap->msix_info[s->nd_msix_idx].vec,
				 adap);
948 949 950 951 952 953 954
		} else {
			free_irq(adap->pdev->irq, adap);
		}
		quiesce_rx(adap);
	}
}

955 956 957 958 959 960 961 962 963 964 965
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
{
	if (q->handler)
		napi_enable(&q->napi);

	/* 0-increment GTS to start the timer and enable interrupts */
	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
		     SEINTARM_V(q->intr_params) |
		     INGRESSQID_V(q->cntxt_id));
}

966 967 968 969 970 971 972
/*
 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 */
static void enable_rx(struct adapter *adap)
{
	int i;

973
	for (i = 0; i < adap->sge.ingr_sz; i++) {
974 975 976 977
		struct sge_rspq *q = adap->sge.ingr_map[i];

		if (!q)
			continue;
978

979
		cxgb4_enable_rx(adap, q);
980 981 982
	}
}

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static int setup_non_data_intr(struct adapter *adap)
{
	int msix;

	adap->sge.nd_msix_idx = -1;
	if (!(adap->flags & CXGB4_USING_MSIX))
		return 0;

	/* Request MSI-X vector for non-data interrupt */
	msix = cxgb4_get_msix_idx_from_bmap(adap);
	if (msix < 0)
		return -ENOMEM;

	snprintf(adap->msix_info[msix].desc,
		 sizeof(adap->msix_info[msix].desc),
		 "%s", adap->port[0]->name);

	adap->sge.nd_msix_idx = msix;
	return 0;
}
1003

1004
static int setup_fw_sge_queues(struct adapter *adap)
1005 1006
{
	struct sge *s = &adap->sge;
1007
	int msix, err = 0;
1008

1009 1010
	bitmap_zero(s->starving_fl, s->egr_sz);
	bitmap_zero(s->txq_maperr, s->egr_sz);
1011

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	if (adap->flags & CXGB4_USING_MSIX) {
		s->fwevtq_msix_idx = -1;
		msix = cxgb4_get_msix_idx_from_bmap(adap);
		if (msix < 0)
			return -ENOMEM;

		snprintf(adap->msix_info[msix].desc,
			 sizeof(adap->msix_info[msix].desc),
			 "%s-FWeventq", adap->port[0]->name);
	} else {
1022
		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023
				       NULL, NULL, NULL, -1);
1024 1025
		if (err)
			return err;
1026
		msix = -((int)s->intrq.abs_id + 1);
1027 1028 1029
	}

	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030 1031 1032 1033 1034
			       msix, NULL, fwevtq_handler, NULL, -1);
	if (err && msix >= 0)
		cxgb4_free_msix_idx_in_bmap(adap, msix);

	s->fwevtq_msix_idx = msix;
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	return err;
}

/**
 *	setup_sge_queues - configure SGE Tx/Rx/response queues
 *	@adap: the adapter
 *
 *	Determines how many sets of SGE queues to use and initializes them.
 *	We support multiple queue sets per port if we have MSI-X, otherwise
 *	just one queue set per port.
 */
static int setup_sge_queues(struct adapter *adap)
{
G
Ganesh Goudar 已提交
1048
	struct sge_uld_rxq_info *rxq_info = NULL;
1049
	struct sge *s = &adap->sge;
1050
	unsigned int cmplqid = 0;
1051
	int err, i, j, msix = 0;
1052

G
Ganesh Goudar 已提交
1053 1054 1055
	if (is_uld(adap))
		rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];

1056 1057 1058
	if (!(adap->flags & CXGB4_USING_MSIX))
		msix = -((int)s->intrq.abs_id + 1);

1059 1060 1061 1062 1063 1064 1065
	for_each_port(adap, i) {
		struct net_device *dev = adap->port[i];
		struct port_info *pi = netdev_priv(dev);
		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];

		for (j = 0; j < pi->nqsets; j++, q++) {
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
			if (msix >= 0) {
				msix = cxgb4_get_msix_idx_from_bmap(adap);
				if (msix < 0) {
					err = msix;
					goto freeout;
				}

				snprintf(adap->msix_info[msix].desc,
					 sizeof(adap->msix_info[msix].desc),
					 "%s-Rx%d", dev->name, j);
				q->msix = &adap->msix_info[msix];
			}

1079
			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080
					       msix, &q->fl,
1081
					       t4_ethrx_handler,
1082
					       NULL,
1083 1084
					       t4_get_tp_ch_map(adap,
								pi->tx_chan));
1085 1086 1087 1088 1089
			if (err)
				goto freeout;
			q->rspq.idx = j;
			memset(&q->stats, 0, sizeof(q->stats));
		}
1090 1091 1092

		q = &s->ethrxq[pi->first_qset];
		for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093 1094
			err = t4_sge_alloc_eth_txq(adap, t, dev,
					netdev_get_tx_queue(dev, j),
1095
					q->rspq.cntxt_id,
1096
					!!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097 1098 1099 1100 1101 1102
			if (err)
				goto freeout;
		}
	}

	for_each_port(adap, i) {
1103
		/* Note that cmplqid below is 0 if we don't
1104 1105
		 * have RDMA queues, and that's the right value.
		 */
1106 1107 1108
		if (rxq_info)
			cmplqid	= rxq_info->uldrxq[i].rspq.cntxt_id;

1109
		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110
					    s->fw_evtq.cntxt_id, cmplqid);
1111 1112 1113 1114
		if (err)
			goto freeout;
	}

1115 1116 1117
	if (!is_t4(adap->params.chip)) {
		err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
					   netdev_get_tx_queue(adap->port[0], 0)
1118
					   , s->fw_evtq.cntxt_id, false);
1119 1120 1121 1122
		if (err)
			goto freeout;
	}

1123
	t4_write_reg(adap, is_t4(adap->params.chip) ?
1124 1125 1126 1127
				MPS_TRC_RSS_CONTROL_A :
				MPS_T5_TRC_RSS_CONTROL_A,
		     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
		     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128
	return 0;
1129
freeout:
1130
	dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131 1132
	t4_free_sge_resources(adap);
	return err;
1133 1134
}

1135
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136
			     struct net_device *sb_dev)
1137 1138 1139 1140 1141 1142 1143 1144 1145
{
	int txq;

#ifdef CONFIG_CHELSIO_T4_DCB
	/* If a Data Center Bridging has been successfully negotiated on this
	 * link then we'll use the skb's priority to map it to a TX Queue.
	 * The skb's priority is determined via the VLAN Tag Priority Code
	 * Point field.
	 */
1146
	if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
		u16 vlan_tci;
		int err;

		err = vlan_get_tag(skb, &vlan_tci);
		if (unlikely(err)) {
			if (net_ratelimit())
				netdev_warn(dev,
					    "TX Packet without VLAN Tag on DCB Link\n");
			txq = 0;
		} else {
			txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
V
Varun Prakash 已提交
1158 1159 1160 1161
#ifdef CONFIG_CHELSIO_T4_FCOE
			if (skb->protocol == htons(ETH_P_FCOE))
				txq = skb->priority & 0x7;
#endif /* CONFIG_CHELSIO_T4_FCOE */
1162 1163 1164 1165 1166
		}
		return txq;
	}
#endif /* CONFIG_CHELSIO_T4_DCB */

1167 1168
	if (dev->num_tc) {
		struct port_info *pi = netdev2pinfo(dev);
1169 1170 1171 1172 1173
		u8 ver, proto;

		ver = ip_hdr(skb)->version;
		proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
				     ip_hdr(skb)->protocol;
1174 1175 1176 1177

		/* Send unsupported traffic pattern to normal NIC queues. */
		txq = netdev_pick_tx(dev, skb, sb_dev);
		if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178 1179
		    skb->encapsulation ||
		    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1180 1181 1182 1183 1184
			txq = txq % pi->nqsets;

		return txq;
	}

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	if (select_queue) {
		txq = (skb_rx_queue_recorded(skb)
			? skb_get_rx_queue(skb)
			: smp_processor_id());

		while (unlikely(txq >= dev->real_num_tx_queues))
			txq -= dev->real_num_tx_queues;

		return txq;
	}

1196
	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1197 1198
}

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
static int closest_timer(const struct sge *s, int time)
{
	int i, delta, match = 0, min_delta = INT_MAX;

	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
		delta = time - s->timer_val[i];
		if (delta < 0)
			delta = -delta;
		if (delta < min_delta) {
			min_delta = delta;
			match = i;
		}
	}
	return match;
}

static int closest_thres(const struct sge *s, int thres)
{
	int i, delta, match = 0, min_delta = INT_MAX;

	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
		delta = thres - s->counter_val[i];
		if (delta < 0)
			delta = -delta;
		if (delta < min_delta) {
			min_delta = delta;
			match = i;
		}
	}
	return match;
}

/**
1232
 *	cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1233 1234 1235 1236 1237 1238 1239
 *	@q: the Rx queue
 *	@us: the hold-off time in us, or 0 to disable timer
 *	@cnt: the hold-off packet count, or 0 to disable counter
 *
 *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
 *	one of the two needs to be enabled for the queue to generate interrupts.
 */
1240 1241
int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
			       unsigned int us, unsigned int cnt)
1242
{
1243 1244
	struct adapter *adap = q->adap;

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	if ((us | cnt) == 0)
		cnt = 1;

	if (cnt) {
		int err;
		u32 v, new_idx;

		new_idx = closest_thres(&adap->sge, cnt);
		if (q->desc && q->pktcnt_idx != new_idx) {
			/* the queue has already been created, update it */
1255 1256 1257 1258
			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
			    FW_PARAMS_PARAM_X_V(
					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
			    FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1259 1260
			err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
					    &v, &new_idx);
1261 1262 1263 1264 1265 1266 1267
			if (err)
				return err;
		}
		q->pktcnt_idx = new_idx;
	}

	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1268
	q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1269 1270 1271
	return 0;
}

1272
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
D
Dimitris Michailidis 已提交
1273
{
1274
	netdev_features_t changed = dev->features ^ features;
1275
	const struct port_info *pi = netdev_priv(dev);
1276 1277
	int err;

1278
	if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1279
		return 0;
1280

1281 1282
	err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
			    pi->viid_mirror, -1, -1, -1, -1,
1283
			    !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1284
	if (unlikely(err))
1285
		dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1286
	return err;
D
Dimitris Michailidis 已提交
1287 1288
}

B
Bill Pemberton 已提交
1289
static int setup_debugfs(struct adapter *adap)
1290 1291 1292 1293
{
	if (IS_ERR_OR_NULL(adap->debugfs_root))
		return -1;

1294 1295 1296
#ifdef CONFIG_DEBUG_FS
	t4_setup_debugfs(adap);
#endif
1297 1298 1299
	return 0;
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
				       struct sge_eth_rxq *mirror_rxq)
{
	if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
	    !(adap->flags & CXGB4_SHUTTING_DOWN))
		cxgb4_quiesce_rx(&mirror_rxq->rspq);

	if (adap->flags & CXGB4_USING_MSIX) {
		cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
				     mirror_rxq->msix->aff_mask);
		free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
		cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
	}

	free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
}

static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);
	struct sge_eth_rxq *mirror_rxq;
	struct sge *s = &adap->sge;
	int ret = 0, msix = 0;
	u16 i, rxqid;
	u16 *rss;

	if (!pi->vi_mirror_count)
		return 0;

	if (s->mirror_rxq[pi->port_id])
		return 0;

	mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
	if (!mirror_rxq)
		return -ENOMEM;

	s->mirror_rxq[pi->port_id] = mirror_rxq;

	if (!(adap->flags & CXGB4_USING_MSIX))
		msix = -((int)adap->sge.intrq.abs_id + 1);

	for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
		mirror_rxq = &s->mirror_rxq[pi->port_id][i];

		/* Allocate Mirror Rxqs */
		if (msix >= 0) {
			msix = cxgb4_get_msix_idx_from_bmap(adap);
			if (msix < 0) {
				ret = msix;
				goto out_free_queues;
			}

			mirror_rxq->msix = &adap->msix_info[msix];
			snprintf(mirror_rxq->msix->desc,
				 sizeof(mirror_rxq->msix->desc),
				 "%s-mirrorrxq%d", dev->name, i);
		}

		init_rspq(adap, &mirror_rxq->rspq,
			  CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
			  CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
			  CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
			  CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);

		mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;

		ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
				       dev, msix, &mirror_rxq->fl,
				       t4_ethrx_handler, NULL, 0);
		if (ret)
			goto out_free_msix_idx;

		/* Setup MSI-X vectors for Mirror Rxqs */
		if (adap->flags & CXGB4_USING_MSIX) {
			ret = request_irq(mirror_rxq->msix->vec,
					  t4_sge_intr_msix, 0,
					  mirror_rxq->msix->desc,
					  &mirror_rxq->rspq);
			if (ret)
				goto out_free_rxq;

			cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
					   &mirror_rxq->msix->aff_mask, i);
		}

		/* Start NAPI for Mirror Rxqs */
		cxgb4_enable_rx(adap, &mirror_rxq->rspq);
	}

	/* Setup RSS for Mirror Rxqs */
	rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
	if (!rss) {
		ret = -ENOMEM;
		goto out_free_queues;
	}

	mirror_rxq = &s->mirror_rxq[pi->port_id][0];
	for (i = 0; i < pi->rss_size; i++)
		rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;

	ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
	kfree(rss);
	if (ret)
		goto out_free_queues;

	return 0;

out_free_rxq:
	free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);

out_free_msix_idx:
	cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);

out_free_queues:
	while (rxqid-- > 0)
		cxgb4_port_mirror_free_rxq(adap,
					   &s->mirror_rxq[pi->port_id][rxqid]);

	kfree(s->mirror_rxq[pi->port_id]);
	s->mirror_rxq[pi->port_id] = NULL;
	return ret;
}

static void cxgb4_port_mirror_free_queues(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);
	struct sge *s = &adap->sge;
	u16 i;

	if (!pi->vi_mirror_count)
		return;

	if (!s->mirror_rxq[pi->port_id])
		return;

	for (i = 0; i < pi->nmirrorqsets; i++)
		cxgb4_port_mirror_free_rxq(adap,
					   &s->mirror_rxq[pi->port_id][i]);

	kfree(s->mirror_rxq[pi->port_id]);
	s->mirror_rxq[pi->port_id] = NULL;
}

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
static int cxgb4_port_mirror_start(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);
	int ret, idx = -1;

	if (!pi->vi_mirror_count)
		return 0;

	/* Mirror VIs can be created dynamically after stack had
	 * already setup Rx modes like MTU, promisc, allmulti, etc.
	 * on main VI. So, parse what the stack had setup on the
	 * main VI and update the same on the mirror VI.
	 */
	ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
			    dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
			    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
	if (ret) {
		dev_err(adap->pdev_dev,
			"Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
			pi->viid_mirror, ret);
		return ret;
	}

	/* Enable replication bit for the device's MAC address
	 * in MPS TCAM, so that the packets for the main VI are
	 * replicated to mirror VI.
	 */
	ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
				    dev->dev_addr, true, NULL);
	if (ret) {
		dev_err(adap->pdev_dev,
			"Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
			pi->viid_mirror, ret);
		return ret;
	}

	/* Enabling a Virtual Interface can result in an interrupt
	 * during the processing of the VI Enable command and, in some
	 * paths, result in an attempt to issue another command in the
	 * interrupt context. Thus, we disable interrupts during the
	 * course of the VI Enable command ...
	 */
	local_bh_disable();
	ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
				  false);
	local_bh_enable();
	if (ret)
		dev_err(adap->pdev_dev,
			"Failed starting Mirror VI 0x%x, ret: %d\n",
			pi->viid_mirror, ret);

	return ret;
}

static void cxgb4_port_mirror_stop(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);

	if (!pi->vi_mirror_count)
		return;

	t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
			    false);
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
int cxgb4_port_mirror_alloc(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);
	int ret = 0;

	if (!pi->nmirrorqsets)
		return -EOPNOTSUPP;

	mutex_lock(&pi->vi_mirror_mutex);
	if (pi->viid_mirror) {
		pi->vi_mirror_count++;
		goto out_unlock;
	}

	ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
				  &pi->viid_mirror);
	if (ret)
		goto out_unlock;

	pi->vi_mirror_count = 1;

1535 1536 1537 1538
	if (adap->flags & CXGB4_FULL_INIT_DONE) {
		ret = cxgb4_port_mirror_alloc_queues(dev);
		if (ret)
			goto out_free_vi;
1539 1540 1541 1542

		ret = cxgb4_port_mirror_start(dev);
		if (ret)
			goto out_free_queues;
1543 1544 1545 1546 1547
	}

	mutex_unlock(&pi->vi_mirror_mutex);
	return 0;

1548 1549 1550
out_free_queues:
	cxgb4_port_mirror_free_queues(dev);

1551 1552 1553 1554 1555
out_free_vi:
	pi->vi_mirror_count = 0;
	t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
	pi->viid_mirror = 0;

1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
out_unlock:
	mutex_unlock(&pi->vi_mirror_mutex);
	return ret;
}

void cxgb4_port_mirror_free(struct net_device *dev)
{
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);

	mutex_lock(&pi->vi_mirror_mutex);
	if (!pi->viid_mirror)
		goto out_unlock;

	if (pi->vi_mirror_count > 1) {
		pi->vi_mirror_count--;
		goto out_unlock;
	}

1575
	cxgb4_port_mirror_stop(dev);
1576 1577
	cxgb4_port_mirror_free_queues(dev);

1578 1579 1580 1581 1582 1583 1584 1585
	pi->vi_mirror_count = 0;
	t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
	pi->viid_mirror = 0;

out_unlock:
	mutex_unlock(&pi->vi_mirror_mutex);
}

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
/*
 * upper-layer driver support
 */

/*
 * Allocate an active-open TID and set it to the supplied value.
 */
int cxgb4_alloc_atid(struct tid_info *t, void *data)
{
	int atid = -1;

	spin_lock_bh(&t->atid_lock);
	if (t->afree) {
		union aopen_entry *p = t->afree;

V
Vipul Pandya 已提交
1601
		atid = (p - t->atid_tab) + t->atid_base;
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		t->afree = p->next;
		p->data = data;
		t->atids_in_use++;
	}
	spin_unlock_bh(&t->atid_lock);
	return atid;
}
EXPORT_SYMBOL(cxgb4_alloc_atid);

/*
 * Release an active-open TID.
 */
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
{
V
Vipul Pandya 已提交
1616
	union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

	spin_lock_bh(&t->atid_lock);
	p->next = t->afree;
	t->afree = p;
	t->atids_in_use--;
	spin_unlock_bh(&t->atid_lock);
}
EXPORT_SYMBOL(cxgb4_free_atid);

/*
 * Allocate a server TID and set it to the supplied value.
 */
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
{
	int stid;

	spin_lock_bh(&t->stid_lock);
	if (family == PF_INET) {
		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
		if (stid < t->nstids)
			__set_bit(stid, t->stid_bmap);
		else
			stid = -1;
	} else {
1641
		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1642 1643 1644 1645 1646 1647
		if (stid < 0)
			stid = -1;
	}
	if (stid >= 0) {
		t->stid_tab[stid].data = data;
		stid += t->stid_base;
1648 1649 1650 1651
		/* IPv6 requires max of 520 bits or 16 cells in TCAM
		 * This is equivalent to 4 TIDs. With CLIP enabled it
		 * needs 2 TIDs.
		 */
1652
		if (family == PF_INET6) {
1653
			t->stids_in_use += 2;
1654 1655 1656 1657
			t->v6_stids_in_use += 2;
		} else {
			t->stids_in_use++;
		}
1658 1659 1660 1661 1662 1663
	}
	spin_unlock_bh(&t->stid_lock);
	return stid;
}
EXPORT_SYMBOL(cxgb4_alloc_stid);

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
/* Allocate a server filter TID and set it to the supplied value.
 */
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
{
	int stid;

	spin_lock_bh(&t->stid_lock);
	if (family == PF_INET) {
		stid = find_next_zero_bit(t->stid_bmap,
				t->nstids + t->nsftids, t->nstids);
		if (stid < (t->nstids + t->nsftids))
			__set_bit(stid, t->stid_bmap);
		else
			stid = -1;
	} else {
		stid = -1;
	}
	if (stid >= 0) {
		t->stid_tab[stid].data = data;
1683 1684
		stid -= t->nstids;
		stid += t->sftid_base;
1685
		t->sftids_in_use++;
1686 1687 1688 1689 1690 1691 1692
	}
	spin_unlock_bh(&t->stid_lock);
	return stid;
}
EXPORT_SYMBOL(cxgb4_alloc_sftid);

/* Release a server TID.
1693 1694 1695
 */
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
1696 1697 1698 1699 1700 1701 1702 1703
	/* Is it a server filter TID? */
	if (t->nsftids && (stid >= t->sftid_base)) {
		stid -= t->sftid_base;
		stid += t->nstids;
	} else {
		stid -= t->stid_base;
	}

1704 1705 1706 1707
	spin_lock_bh(&t->stid_lock);
	if (family == PF_INET)
		__clear_bit(stid, t->stid_bmap);
	else
1708
		bitmap_release_region(t->stid_bmap, stid, 1);
1709
	t->stid_tab[stid].data = NULL;
1710
	if (stid < t->nstids) {
1711
		if (family == PF_INET6) {
1712
			t->stids_in_use -= 2;
1713 1714 1715 1716
			t->v6_stids_in_use -= 2;
		} else {
			t->stids_in_use--;
		}
1717 1718 1719
	} else {
		t->sftids_in_use--;
	}
1720

1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);

/*
 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
 */
static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
			   unsigned int tid)
{
	struct cpl_tid_release *req;

	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1734
	req = __skb_put(skb, sizeof(*req));
1735 1736 1737 1738 1739 1740 1741 1742
	INIT_TP_WR(req, tid);
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
}

/*
 * Queue a TID release request and if necessary schedule a work queue to
 * process it.
 */
1743 1744
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
				    unsigned int tid)
1745 1746
{
	struct adapter *adap = container_of(t, struct adapter, tids);
1747
	void **p = &t->tid_tab[tid - t->tid_base];
1748 1749 1750 1751 1752 1753 1754

	spin_lock_bh(&adap->tid_release_lock);
	*p = adap->tid_release_head;
	/* Low 2 bits encode the Tx channel number */
	adap->tid_release_head = (void **)((uintptr_t)p | chan);
	if (!adap->tid_release_task_busy) {
		adap->tid_release_task_busy = true;
1755
		queue_work(adap->workq, &adap->tid_release_task);
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
	}
	spin_unlock_bh(&adap->tid_release_lock);
}

/*
 * Process the list of pending TID release requests.
 */
static void process_tid_release_list(struct work_struct *work)
{
	struct sk_buff *skb;
	struct adapter *adap;

	adap = container_of(work, struct adapter, tid_release_task);

	spin_lock_bh(&adap->tid_release_lock);
	while (adap->tid_release_head) {
		void **p = adap->tid_release_head;
		unsigned int chan = (uintptr_t)p & 3;
		p = (void *)p - chan;

		adap->tid_release_head = *p;
		*p = NULL;
		spin_unlock_bh(&adap->tid_release_lock);

		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
					 GFP_KERNEL)))
			schedule_timeout_uninterruptible(1);

		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
		t4_ofld_send(adap, skb);
		spin_lock_bh(&adap->tid_release_lock);
	}
	adap->tid_release_task_busy = false;
	spin_unlock_bh(&adap->tid_release_lock);
}

/*
 * Release a TID and inform HW.  If we are unable to allocate the release
 * message we defer to a work queue.
 */
1796 1797
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
		      unsigned short family)
1798 1799
{
	struct adapter *adap = container_of(t, struct adapter, tids);
1800
	struct sk_buff *skb;
1801

1802
	WARN_ON(tid_out_of_range(&adap->tids, tid));
1803

1804 1805
	if (t->tid_tab[tid - adap->tids.tid_base]) {
		t->tid_tab[tid - adap->tids.tid_base] = NULL;
1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
		atomic_dec(&t->conns_in_use);
		if (t->hash_base && (tid >= t->hash_base)) {
			if (family == AF_INET6)
				atomic_sub(2, &t->hash_tids_in_use);
			else
				atomic_dec(&t->hash_tids_in_use);
		} else {
			if (family == AF_INET6)
				atomic_sub(2, &t->tids_in_use);
			else
				atomic_dec(&t->tids_in_use);
		}
1818 1819
	}

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
	if (likely(skb)) {
		mk_tid_release(skb, chan, tid);
		t4_ofld_send(adap, skb);
	} else
		cxgb4_queue_tid_release(t, chan, tid);
}
EXPORT_SYMBOL(cxgb4_remove_tid);

/*
 * Allocate and initialize the TID tables.  Returns 0 on success.
 */
static int tid_init(struct tid_info *t)
{
1834
	struct adapter *adap = container_of(t, struct adapter, tids);
1835 1836
	unsigned int max_ftids = t->nftids + t->nsftids;
	unsigned int natids = t->natids;
1837
	unsigned int hpftid_bmap_size;
1838
	unsigned int eotid_bmap_size;
1839 1840 1841
	unsigned int stid_bmap_size;
	unsigned int ftid_bmap_size;
	size_t size;
1842

1843
	stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1844
	ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1845
	hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1846
	eotid_bmap_size = BITS_TO_LONGS(t->neotids);
V
Vipul Pandya 已提交
1847 1848
	size = t->ntids * sizeof(*t->tid_tab) +
	       natids * sizeof(*t->atid_tab) +
1849
	       t->nstids * sizeof(*t->stid_tab) +
1850
	       t->nsftids * sizeof(*t->stid_tab) +
V
Vipul Pandya 已提交
1851
	       stid_bmap_size * sizeof(long) +
1852 1853
	       t->nhpftids * sizeof(*t->hpftid_tab) +
	       hpftid_bmap_size * sizeof(long) +
1854
	       max_ftids * sizeof(*t->ftid_tab) +
1855 1856 1857
	       ftid_bmap_size * sizeof(long) +
	       t->neotids * sizeof(*t->eotid_tab) +
	       eotid_bmap_size * sizeof(long);
V
Vipul Pandya 已提交
1858

1859
	t->tid_tab = kvzalloc(size, GFP_KERNEL);
1860 1861 1862 1863 1864
	if (!t->tid_tab)
		return -ENOMEM;

	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1865
	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1866 1867 1868
	t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
	t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
	t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1869
	t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1870 1871
	t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
	t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1872 1873
	spin_lock_init(&t->stid_lock);
	spin_lock_init(&t->atid_lock);
1874
	spin_lock_init(&t->ftid_lock);
1875 1876

	t->stids_in_use = 0;
1877
	t->v6_stids_in_use = 0;
1878
	t->sftids_in_use = 0;
1879 1880 1881
	t->afree = NULL;
	t->atids_in_use = 0;
	atomic_set(&t->tids_in_use, 0);
1882
	atomic_set(&t->conns_in_use, 0);
1883
	atomic_set(&t->hash_tids_in_use, 0);
1884
	atomic_set(&t->eotids_in_use, 0);
1885 1886 1887 1888 1889 1890 1891

	/* Setup the free list for atid_tab and clear the stid bitmap. */
	if (natids) {
		while (--natids)
			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
		t->afree = t->atid_tab;
	}
1892

1893 1894 1895 1896 1897 1898
	if (is_offload(adap)) {
		bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
		/* Reserve stid 0 for T4/T5 adapters */
		if (!t->stid_base &&
		    CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
			__set_bit(0, t->stid_bmap);
1899 1900 1901

		if (t->neotids)
			bitmap_zero(t->eotid_bmap, t->neotids);
1902 1903
	}

1904 1905
	if (t->nhpftids)
		bitmap_zero(t->hpftid_bmap, t->nhpftids);
1906
	bitmap_zero(t->ftid_bmap, t->nftids);
1907 1908 1909 1910 1911 1912 1913 1914 1915
	return 0;
}

/**
 *	cxgb4_create_server - create an IP server
 *	@dev: the device
 *	@stid: the server TID
 *	@sip: local IP address to bind server to
 *	@sport: the server's TCP port
1916
 *	@vlan: the VLAN header information
1917 1918 1919 1920 1921 1922
 *	@queue: queue to direct messages from this server to
 *
 *	Create an IP server for the given port and address.
 *	Returns <0 on error and one of the %NET_XMIT_* values on success.
 */
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1923 1924
			__be32 sip, __be16 sport, __be16 vlan,
			unsigned int queue)
1925 1926 1927 1928 1929
{
	unsigned int chan;
	struct sk_buff *skb;
	struct adapter *adap;
	struct cpl_pass_open_req *req;
1930
	int ret;
1931 1932 1933 1934 1935 1936

	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	adap = netdev2adap(dev);
1937
	req = __skb_put(skb, sizeof(*req));
1938 1939 1940 1941 1942 1943
	INIT_TP_WR(req, 0);
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
	req->local_port = sport;
	req->peer_port = htons(0);
	req->local_ip = sip;
	req->peer_ip = htonl(0);
1944
	chan = rxq_to_chan(&adap->sge, queue);
1945
	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1946 1947
	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1948 1949
	ret = t4_mgmt_tx(adap, skb);
	return net_xmit_eval(ret);
1950 1951 1952
}
EXPORT_SYMBOL(cxgb4_create_server);

1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
/*	cxgb4_create_server6 - create an IPv6 server
 *	@dev: the device
 *	@stid: the server TID
 *	@sip: local IPv6 address to bind server to
 *	@sport: the server's TCP port
 *	@queue: queue to direct messages from this server to
 *
 *	Create an IPv6 server for the given port and address.
 *	Returns <0 on error and one of the %NET_XMIT_* values on success.
 */
int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
			 const struct in6_addr *sip, __be16 sport,
			 unsigned int queue)
{
	unsigned int chan;
	struct sk_buff *skb;
	struct adapter *adap;
	struct cpl_pass_open_req6 *req;
	int ret;

	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

	adap = netdev2adap(dev);
1978
	req = __skb_put(skb, sizeof(*req));
1979 1980 1981 1982 1983 1984 1985 1986 1987
	INIT_TP_WR(req, 0);
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
	req->local_port = sport;
	req->peer_port = htons(0);
	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
	req->peer_ip_hi = cpu_to_be64(0);
	req->peer_ip_lo = cpu_to_be64(0);
	chan = rxq_to_chan(&adap->sge, queue);
1988
	req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1989 1990
	req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
				SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
	ret = t4_mgmt_tx(adap, skb);
	return net_xmit_eval(ret);
}
EXPORT_SYMBOL(cxgb4_create_server6);

int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
			unsigned int queue, bool ipv6)
{
	struct sk_buff *skb;
	struct adapter *adap;
	struct cpl_close_listsvr_req *req;
	int ret;

	adap = netdev2adap(dev);

	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
	if (!skb)
		return -ENOMEM;

2010
	req = __skb_put(skb, sizeof(*req));
2011 2012
	INIT_TP_WR(req, 0);
	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2013 2014
	req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
				LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2015 2016 2017 2018 2019
	ret = t4_mgmt_tx(adap, skb);
	return net_xmit_eval(ret);
}
EXPORT_SYMBOL(cxgb4_remove_server);

2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
/**
 *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
 *	@mtus: the HW MTU table
 *	@mtu: the target MTU
 *	@idx: index of selected entry in the MTU table
 *
 *	Returns the index and the value in the HW MTU table that is closest to
 *	but does not exceed @mtu, unless @mtu is smaller than any value in the
 *	table, in which case that smallest available value is selected.
 */
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
			    unsigned int *idx)
{
	unsigned int i = 0;

	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
		++i;
	if (idx)
		*idx = i;
	return mtus[i];
}
EXPORT_SYMBOL(cxgb4_best_mtu);

2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
/**
 *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
 *     @mtus: the HW MTU table
 *     @header_size: Header Size
 *     @data_size_max: maximum Data Segment Size
 *     @data_size_align: desired Data Segment Size Alignment (2^N)
 *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
 *
 *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
 *     MTU Table based solely on a Maximum MTU parameter, we break that
 *     parameter up into a Header Size and Maximum Data Segment Size, and
 *     provide a desired Data Segment Size Alignment.  If we find an MTU in
 *     the Hardware MTU Table which will result in a Data Segment Size with
 *     the requested alignment _and_ that MTU isn't "too far" from the
 *     closest MTU, then we'll return that rather than the closest MTU.
 */
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
				    unsigned short header_size,
				    unsigned short data_size_max,
				    unsigned short data_size_align,
				    unsigned int *mtu_idxp)
{
	unsigned short max_mtu = header_size + data_size_max;
	unsigned short data_size_align_mask = data_size_align - 1;
	int mtu_idx, aligned_mtu_idx;

	/* Scan the MTU Table till we find an MTU which is larger than our
	 * Maximum MTU or we reach the end of the table.  Along the way,
	 * record the last MTU found, if any, which will result in a Data
	 * Segment Length matching the requested alignment.
	 */
	for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
		unsigned short data_size = mtus[mtu_idx] - header_size;

		/* If this MTU minus the Header Size would result in a
		 * Data Segment Size of the desired alignment, remember it.
		 */
		if ((data_size & data_size_align_mask) == 0)
			aligned_mtu_idx = mtu_idx;

		/* If we're not at the end of the Hardware MTU Table and the
		 * next element is larger than our Maximum MTU, drop out of
		 * the loop.
		 */
		if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
			break;
	}

	/* If we fell out of the loop because we ran to the end of the table,
	 * then we just have to use the last [largest] entry.
	 */
	if (mtu_idx == NMTUS)
		mtu_idx--;

	/* If we found an MTU which resulted in the requested Data Segment
	 * Length alignment and that's "not far" from the largest MTU which is
	 * less than or equal to the maximum MTU, then use that.
	 */
	if (aligned_mtu_idx >= 0 &&
	    mtu_idx - aligned_mtu_idx <= 1)
		mtu_idx = aligned_mtu_idx;

	/* If the caller has passed in an MTU Index pointer, pass the
	 * MTU Index back.  Return the MTU value.
	 */
	if (mtu_idxp)
		*mtu_idxp = mtu_idx;
	return mtus[mtu_idx];
}
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);

2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
/**
 *	cxgb4_port_chan - get the HW channel of a port
 *	@dev: the net device for the port
 *
 *	Return the HW Tx channel of the given port.
 */
unsigned int cxgb4_port_chan(const struct net_device *dev)
{
	return netdev2pinfo(dev)->tx_chan;
}
EXPORT_SYMBOL(cxgb4_port_chan);

2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
/**
 *      cxgb4_port_e2cchan - get the HW c-channel of a port
 *      @dev: the net device for the port
 *
 *      Return the HW RX c-channel of the given port.
 */
unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
{
	return netdev2pinfo(dev)->rx_cchan;
}
EXPORT_SYMBOL(cxgb4_port_e2cchan);

2138 2139 2140
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
	struct adapter *adap = netdev2adap(dev);
2141
	u32 v1, v2, lp_count, hp_count;
2142

2143 2144
	v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
	v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2145
	if (is_t4(adap->params.chip)) {
2146 2147
		lp_count = LP_COUNT_G(v1);
		hp_count = HP_COUNT_G(v1);
2148
	} else {
2149 2150
		lp_count = LP_COUNT_T5_G(v1);
		hp_count = HP_COUNT_T5_G(v2);
2151 2152
	}
	return lpfifo ? lp_count : hp_count;
2153 2154 2155
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
/**
 *	cxgb4_port_viid - get the VI id of a port
 *	@dev: the net device for the port
 *
 *	Return the VI id of the given port.
 */
unsigned int cxgb4_port_viid(const struct net_device *dev)
{
	return netdev2pinfo(dev)->viid;
}
EXPORT_SYMBOL(cxgb4_port_viid);

/**
 *	cxgb4_port_idx - get the index of a port
 *	@dev: the net device for the port
 *
 *	Return the index of the given port.
 */
unsigned int cxgb4_port_idx(const struct net_device *dev)
{
	return netdev2pinfo(dev)->port_id;
}
EXPORT_SYMBOL(cxgb4_port_idx);

void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
			 struct tp_tcp_stats *v6)
{
	struct adapter *adap = pci_get_drvdata(pdev);

	spin_lock(&adap->stats_lock);
2186
	t4_tp_get_tcp_stats(adap, v4, v6, false);
2187 2188 2189 2190 2191 2192 2193 2194 2195
	spin_unlock(&adap->stats_lock);
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);

void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
		      const unsigned int *pgsz_order)
{
	struct adapter *adap = netdev2adap(dev);

2196 2197 2198 2199
	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
	t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
		     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
		     HPZ3_V(pgsz_order[3]));
2200 2201 2202
}
EXPORT_SYMBOL(cxgb4_iscsi_init);

2203 2204 2205 2206
int cxgb4_flush_eq_cache(struct net_device *dev)
{
	struct adapter *adap = netdev2adap(dev);

2207
	return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2208 2209 2210 2211 2212
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);

static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
{
2213
	u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2214 2215 2216
	__be64 indices;
	int ret;

2217 2218 2219 2220 2221
	spin_lock(&adap->win0_lock);
	ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
			   sizeof(indices), (__be32 *)&indices,
			   T4_MEMORY_READ);
	spin_unlock(&adap->win0_lock);
2222
	if (!ret) {
2223 2224
		*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
		*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
	}
	return ret;
}

int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
			u16 size)
{
	struct adapter *adap = netdev2adap(dev);
	u16 hw_pidx, hw_cidx;
	int ret;

	ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
	if (ret)
		goto out;

	if (pidx != hw_pidx) {
		u16 delta;
2242
		u32 val;
2243 2244 2245 2246 2247

		if (pidx >= hw_pidx)
			delta = pidx - hw_pidx;
		else
			delta = size - hw_pidx + pidx;
2248 2249 2250 2251 2252

		if (is_t4(adap->params.chip))
			val = PIDX_V(delta);
		else
			val = PIDX_T5_V(delta);
2253
		wmb();
2254 2255
		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
			     QID_V(qid) | val);
2256 2257 2258 2259 2260 2261
	}
out:
	return ret;
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);

2262 2263
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
2264
	u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2265
	u32 edc0_end, edc1_end, mc0_end, mc1_end;
A
Arjun Vynipadath 已提交
2266 2267 2268
	u32 offset, memtype, memaddr;
	struct adapter *adap;
	u32 hma_size = 0;
2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
	int ret;

	adap = netdev2adap(dev);

	offset = ((stag >> 8) * 32) + adap->vres.stag.start;

	/* Figure out where the offset lands in the Memory Type/Address scheme.
	 * This code assumes that the memory is laid out starting at offset 0
	 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
	 * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
	 * MC0, and some have both MC0 and MC1.
	 */
2281 2282 2283 2284 2285 2286
	size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
	edc0_size = EDRAM0_SIZE_G(size) << 20;
	size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
	edc1_size = EDRAM1_SIZE_G(size) << 20;
	size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
	mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2287

A
Arjun Vynipadath 已提交
2288 2289 2290 2291
	if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
		size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
		hma_size = EXT_MEM1_SIZE_G(size) << 20;
	}
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302
	edc0_end = edc0_size;
	edc1_end = edc0_end + edc1_size;
	mc0_end = edc1_end + mc0_size;

	if (offset < edc0_end) {
		memtype = MEM_EDC0;
		memaddr = offset;
	} else if (offset < edc1_end) {
		memtype = MEM_EDC1;
		memaddr = offset - edc0_end;
	} else {
A
Arjun Vynipadath 已提交
2303 2304 2305 2306
		if (hma_size && (offset < (edc1_end + hma_size))) {
			memtype = MEM_HMA;
			memaddr = offset - edc1_end;
		} else if (offset < mc0_end) {
2307 2308
			memtype = MEM_MC0;
			memaddr = offset - edc1_end;
2309
		} else if (is_t5(adap->params.chip)) {
2310 2311
			size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
			mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2312 2313 2314 2315 2316 2317 2318 2319
			mc1_end = mc0_end + mc1_size;
			if (offset < mc1_end) {
				memtype = MEM_MC1;
				memaddr = offset - mc0_end;
			} else {
				/* offset beyond the end of any memory */
				goto err;
			}
2320 2321 2322
		} else {
			/* T4/T6 only has a single memory channel */
			goto err;
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
		}
	}

	spin_lock(&adap->win0_lock);
	ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
	spin_unlock(&adap->win0_lock);
	return ret;

err:
	dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
		stag, offset);
	return -EINVAL;
}
EXPORT_SYMBOL(cxgb4_read_tpte);

2338 2339 2340 2341 2342 2343
u64 cxgb4_read_sge_timestamp(struct net_device *dev)
{
	u32 hi, lo;
	struct adapter *adap;

	adap = netdev2adap(dev);
2344 2345
	lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
	hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2346 2347 2348 2349 2350

	return ((u64)hi << 32) | (u64)lo;
}
EXPORT_SYMBOL(cxgb4_read_sge_timestamp);

2351 2352 2353
int cxgb4_bar2_sge_qregs(struct net_device *dev,
			 unsigned int qid,
			 enum cxgb4_bar2_qtype qtype,
2354
			 int user,
2355 2356 2357
			 u64 *pbar2_qoffset,
			 unsigned int *pbar2_qid)
{
2358
	return t4_bar2_sge_qregs(netdev2adap(dev),
2359 2360 2361 2362
				 qid,
				 (qtype == CXGB4_BAR2_QTYPE_EGRESS
				  ? T4_BAR2_QTYPE_EGRESS
				  : T4_BAR2_QTYPE_INGRESS),
2363
				 user,
2364 2365 2366 2367 2368
				 pbar2_qoffset,
				 pbar2_qid);
}
EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);

2369 2370 2371 2372 2373 2374 2375
static struct pci_driver cxgb4_driver;

static void check_neigh_update(struct neighbour *neigh)
{
	const struct device *parent;
	const struct net_device *netdev = neigh->dev;

2376
	if (is_vlan_dev(netdev))
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
		netdev = vlan_dev_real_dev(netdev);
	parent = netdev->dev.parent;
	if (parent && parent->driver == &cxgb4_driver.driver)
		t4_l2t_update(dev_get_drvdata(parent), neigh);
}

static int netevent_cb(struct notifier_block *nb, unsigned long event,
		       void *data)
{
	switch (event) {
	case NETEVENT_NEIGH_UPDATE:
		check_neigh_update(data);
		break;
	case NETEVENT_REDIRECT:
	default:
		break;
	}
	return 0;
}

static bool netevent_registered;
static struct notifier_block cxgb4_netevent_nb = {
	.notifier_call = netevent_cb
};

2402 2403
static void drain_db_fifo(struct adapter *adap, int usecs)
{
2404
	u32 v1, v2, lp_count, hp_count;
2405 2406

	do {
2407 2408
		v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
		v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2409
		if (is_t4(adap->params.chip)) {
2410 2411
			lp_count = LP_COUNT_G(v1);
			hp_count = HP_COUNT_G(v1);
2412
		} else {
2413 2414
			lp_count = LP_COUNT_T5_G(v1);
			hp_count = HP_COUNT_T5_G(v2);
2415 2416 2417 2418
		}

		if (lp_count == 0 && hp_count == 0)
			break;
2419 2420 2421 2422 2423 2424 2425
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(usecs_to_jiffies(usecs));
	} while (1);
}

static void disable_txq_db(struct sge_txq *q)
{
2426 2427 2428
	unsigned long flags;

	spin_lock_irqsave(&q->db_lock, flags);
2429
	q->db_disabled = 1;
2430
	spin_unlock_irqrestore(&q->db_lock, flags);
2431 2432
}

2433
static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2434 2435
{
	spin_lock_irq(&q->db_lock);
2436 2437 2438 2439 2440
	if (q->db_pidx_inc) {
		/* Make sure that all writes to the TX descriptors
		 * are committed before we tell HW about them.
		 */
		wmb();
2441 2442
		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
			     QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2443 2444
		q->db_pidx_inc = 0;
	}
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
	q->db_disabled = 0;
	spin_unlock_irq(&q->db_lock);
}

static void disable_dbs(struct adapter *adap)
{
	int i;

	for_each_ethrxq(&adap->sge, i)
		disable_txq_db(&adap->sge.ethtxq[i].q);
2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
	if (is_offload(adap)) {
		struct sge_uld_txq_info *txq_info =
			adap->sge.uld_txq_info[CXGB4_TX_OFLD];

		if (txq_info) {
			for_each_ofldtxq(&adap->sge, i) {
				struct sge_uld_txq *txq = &txq_info->uldtxq[i];

				disable_txq_db(&txq->q);
			}
		}
	}
2467 2468 2469 2470 2471 2472 2473 2474 2475
	for_each_port(adap, i)
		disable_txq_db(&adap->sge.ctrlq[i].q);
}

static void enable_dbs(struct adapter *adap)
{
	int i;

	for_each_ethrxq(&adap->sge, i)
2476
		enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	if (is_offload(adap)) {
		struct sge_uld_txq_info *txq_info =
			adap->sge.uld_txq_info[CXGB4_TX_OFLD];

		if (txq_info) {
			for_each_ofldtxq(&adap->sge, i) {
				struct sge_uld_txq *txq = &txq_info->uldtxq[i];

				enable_txq_db(adap, &txq->q);
			}
		}
	}
2489
	for_each_port(adap, i)
2490 2491 2492 2493 2494
		enable_txq_db(adap, &adap->sge.ctrlq[i].q);
}

static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
2495 2496 2497 2498
	enum cxgb4_uld type = CXGB4_ULD_RDMA;

	if (adap->uld && adap->uld[type].handle)
		adap->uld[type].control(adap->uld[type].handle, cmd);
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509
}

static void process_db_full(struct work_struct *work)
{
	struct adapter *adap;

	adap = container_of(work, struct adapter, db_full_task);

	drain_db_fifo(adap, dbfifo_drain_delay);
	enable_dbs(adap);
	notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2510 2511 2512 2513 2514 2515 2516
	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
	else
		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
				 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2517 2518 2519 2520 2521 2522 2523
}

static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
{
	u16 hw_pidx, hw_cidx;
	int ret;

2524
	spin_lock_irq(&q->db_lock);
2525 2526 2527 2528 2529
	ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
	if (ret)
		goto out;
	if (q->db_pidx != hw_pidx) {
		u16 delta;
2530
		u32 val;
2531 2532 2533 2534 2535

		if (q->db_pidx >= hw_pidx)
			delta = q->db_pidx - hw_pidx;
		else
			delta = q->size - hw_pidx + q->db_pidx;
2536 2537 2538 2539 2540

		if (is_t4(adap->params.chip))
			val = PIDX_V(delta);
		else
			val = PIDX_T5_V(delta);
2541
		wmb();
2542 2543
		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
			     QID_V(q->cntxt_id) | val);
2544 2545 2546
	}
out:
	q->db_disabled = 0;
2547 2548
	q->db_pidx_inc = 0;
	spin_unlock_irq(&q->db_lock);
2549 2550 2551
	if (ret)
		CH_WARN(adap, "DB drop recovery failed.\n");
}
2552

2553 2554 2555 2556 2557 2558
static void recover_all_queues(struct adapter *adap)
{
	int i;

	for_each_ethrxq(&adap->sge, i)
		sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
	if (is_offload(adap)) {
		struct sge_uld_txq_info *txq_info =
			adap->sge.uld_txq_info[CXGB4_TX_OFLD];
		if (txq_info) {
			for_each_ofldtxq(&adap->sge, i) {
				struct sge_uld_txq *txq = &txq_info->uldtxq[i];

				sync_txq_pidx(adap, &txq->q);
			}
		}
	}
2570 2571 2572 2573
	for_each_port(adap, i)
		sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
}

2574 2575 2576 2577
static void process_db_drop(struct work_struct *work)
{
	struct adapter *adap;

2578
	adap = container_of(work, struct adapter, db_drop_task);
2579

2580
	if (is_t4(adap->params.chip)) {
2581
		drain_db_fifo(adap, dbfifo_drain_delay);
2582
		notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2583
		drain_db_fifo(adap, dbfifo_drain_delay);
2584
		recover_all_queues(adap);
2585
		drain_db_fifo(adap, dbfifo_drain_delay);
2586
		enable_dbs(adap);
2587
		notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2588
	} else if (is_t5(adap->params.chip)) {
2589 2590 2591
		u32 dropped_db = t4_read_reg(adap, 0x010ac);
		u16 qid = (dropped_db >> 15) & 0x1ffff;
		u16 pidx_inc = dropped_db & 0x1fff;
2592 2593 2594
		u64 bar2_qoffset;
		unsigned int bar2_qid;
		int ret;
2595

2596
		ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2597
					0, &bar2_qoffset, &bar2_qid);
2598 2599 2600 2601
		if (ret)
			dev_err(adap->pdev_dev, "doorbell drop recovery: "
				"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
		else
2602
			writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2603
			       adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2604 2605 2606 2607 2608

		/* Re-enable BAR2 WC */
		t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
	}

2609 2610
	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
		t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2611 2612 2613 2614
}

void t4_db_full(struct adapter *adap)
{
2615
	if (is_t4(adap->params.chip)) {
2616 2617
		disable_dbs(adap);
		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2618 2619
		t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
				 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2620
		queue_work(adap->workq, &adap->db_full_task);
2621
	}
2622 2623 2624 2625
}

void t4_db_dropped(struct adapter *adap)
{
2626 2627 2628 2629
	if (is_t4(adap->params.chip)) {
		disable_dbs(adap);
		notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
	}
2630
	queue_work(adap->workq, &adap->db_drop_task);
2631 2632
}

2633 2634
void t4_register_netevent_notifier(void)
{
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
	if (!netevent_registered) {
		register_netevent_notifier(&cxgb4_netevent_nb);
		netevent_registered = true;
	}
}

static void detach_ulds(struct adapter *adap)
{
	unsigned int i;

	mutex_lock(&uld_mutex);
	list_del(&adap->list_node);
2647

2648
	for (i = 0; i < CXGB4_ULD_MAX; i++)
2649
		if (adap->uld && adap->uld[i].handle)
2650 2651
			adap->uld[i].state_change(adap->uld[i].handle,
					     CXGB4_STATE_DETACH);
2652

2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
	if (netevent_registered && list_empty(&adapter_list)) {
		unregister_netevent_notifier(&cxgb4_netevent_nb);
		netevent_registered = false;
	}
	mutex_unlock(&uld_mutex);
}

static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
{
	unsigned int i;

	mutex_lock(&uld_mutex);
	for (i = 0; i < CXGB4_ULD_MAX; i++)
2666 2667 2668
		if (adap->uld && adap->uld[i].handle)
			adap->uld[i].state_change(adap->uld[i].handle,
						  new_state);
2669 2670 2671
	mutex_unlock(&uld_mutex);
}

2672
#if IS_ENABLED(CONFIG_IPV6)
2673 2674
static int cxgb4_inet6addr_handler(struct notifier_block *this,
				   unsigned long event, void *data)
2675
{
2676 2677 2678 2679
	struct inet6_ifaddr *ifa = data;
	struct net_device *event_dev = ifa->idev->dev;
	const struct device *parent = NULL;
#if IS_ENABLED(CONFIG_BONDING)
2680
	struct adapter *adap;
2681
#endif
2682
	if (is_vlan_dev(event_dev))
2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
		event_dev = vlan_dev_real_dev(event_dev);
#if IS_ENABLED(CONFIG_BONDING)
	if (event_dev->flags & IFF_MASTER) {
		list_for_each_entry(adap, &adapter_list, list_node) {
			switch (event) {
			case NETDEV_UP:
				cxgb4_clip_get(adap->port[0],
					       (const u32 *)ifa, 1);
				break;
			case NETDEV_DOWN:
				cxgb4_clip_release(adap->port[0],
						   (const u32 *)ifa, 1);
				break;
			default:
				break;
			}
		}
		return NOTIFY_OK;
	}
#endif
2703

2704 2705
	if (event_dev)
		parent = event_dev->dev.parent;
2706

2707
	if (parent && parent->driver == &cxgb4_driver.driver) {
2708 2709
		switch (event) {
		case NETDEV_UP:
2710
			cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2711 2712
			break;
		case NETDEV_DOWN:
2713
			cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2714 2715 2716 2717 2718
			break;
		default:
			break;
		}
	}
2719
	return NOTIFY_OK;
2720 2721
}

2722
static bool inet6addr_registered;
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
static struct notifier_block cxgb4_inet6addr_notifier = {
	.notifier_call = cxgb4_inet6addr_handler
};

static void update_clip(const struct adapter *adap)
{
	int i;
	struct net_device *dev;
	int ret;

	rcu_read_lock();

	for (i = 0; i < MAX_NPORTS; i++) {
		dev = adap->port[i];
		ret = 0;

		if (dev)
2740
			ret = cxgb4_update_root_dev_clip(dev);
2741 2742 2743 2744 2745 2746

		if (ret < 0)
			break;
	}
	rcu_read_unlock();
}
2747
#endif /* IS_ENABLED(CONFIG_IPV6) */
2748

2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760
/**
 *	cxgb_up - enable the adapter
 *	@adap: adapter being enabled
 *
 *	Called when the first port is enabled, this function performs the
 *	actions necessary to make an adapter operational, such as completing
 *	the initialization of HW modules, and enabling interrupts.
 *
 *	Must be called with the rtnl lock held.
 */
static int cxgb_up(struct adapter *adap)
{
2761
	struct sge *s = &adap->sge;
2762
	int err;
2763

2764
	mutex_lock(&uld_mutex);
2765 2766
	err = setup_sge_queues(adap);
	if (err)
2767
		goto rel_lock;
2768 2769 2770
	err = setup_rss(adap);
	if (err)
		goto freeq;
2771

2772
	if (adap->flags & CXGB4_USING_MSIX) {
2773 2774 2775 2776 2777 2778 2779 2780
		if (s->nd_msix_idx < 0) {
			err = -ENOMEM;
			goto irq_err;
		}

		err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
				  t4_nondata_intr, 0,
				  adap->msix_info[s->nd_msix_idx].desc, adap);
2781 2782
		if (err)
			goto irq_err;
2783

2784
		err = request_msix_queue_irqs(adap);
2785 2786
		if (err)
			goto irq_err_free_nd_msix;
2787 2788
	} else {
		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2789 2790
				  (adap->flags & CXGB4_USING_MSI) ? 0
								  : IRQF_SHARED,
2791
				  adap->port[0]->name, adap);
2792 2793 2794
		if (err)
			goto irq_err;
	}
2795

2796 2797 2798
	enable_rx(adap);
	t4_sge_start(adap);
	t4_intr_enable(adap);
2799
	adap->flags |= CXGB4_FULL_INIT_DONE;
2800 2801
	mutex_unlock(&uld_mutex);

2802
	notify_ulds(adap, CXGB4_STATE_UP);
2803
#if IS_ENABLED(CONFIG_IPV6)
2804
	update_clip(adap);
2805
#endif
2806
	return err;
2807

2808 2809 2810
irq_err_free_nd_msix:
	free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
irq_err:
2811
	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2812
freeq:
2813
	t4_free_sge_resources(adap);
2814
rel_lock:
2815 2816
	mutex_unlock(&uld_mutex);
	return err;
2817 2818 2819 2820 2821
}

static void cxgb_down(struct adapter *adapter)
{
	cancel_work_sync(&adapter->tid_release_task);
2822 2823
	cancel_work_sync(&adapter->db_full_task);
	cancel_work_sync(&adapter->db_drop_task);
2824
	adapter->tid_release_task_busy = false;
D
Dimitris Michailidis 已提交
2825
	adapter->tid_release_head = NULL;
2826

2827 2828
	t4_sge_stop(adapter);
	t4_free_sge_resources(adapter);
A
Arjun Vynipadath 已提交
2829

2830
	adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2831 2832 2833 2834 2835
}

/*
 * net_device operations
 */
2836
int cxgb_open(struct net_device *dev)
2837 2838 2839
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;
2840
	int err;
2841

2842 2843
	netif_carrier_off(dev);

2844
	if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2845 2846 2847 2848
		err = cxgb_up(adapter);
		if (err < 0)
			return err;
	}
2849

2850 2851 2852 2853 2854 2855 2856
	/* It's possible that the basic port information could have
	 * changed since we first read it.
	 */
	err = t4_update_port_info(pi);
	if (err < 0)
		return err;

2857
	err = link_start(dev);
2858 2859 2860 2861 2862 2863 2864 2865
	if (err)
		return err;

	if (pi->nmirrorqsets) {
		mutex_lock(&pi->vi_mirror_mutex);
		err = cxgb4_port_mirror_alloc_queues(dev);
		if (err)
			goto out_unlock;
2866 2867 2868 2869

		err = cxgb4_port_mirror_start(dev);
		if (err)
			goto out_free_queues;
2870 2871 2872 2873 2874 2875
		mutex_unlock(&pi->vi_mirror_mutex);
	}

	netif_tx_start_all_queues(dev);
	return 0;

2876 2877 2878
out_free_queues:
	cxgb4_port_mirror_free_queues(dev);

2879 2880
out_unlock:
	mutex_unlock(&pi->vi_mirror_mutex);
2881
	return err;
2882 2883
}

2884
int cxgb_close(struct net_device *dev)
2885 2886 2887
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;
2888
	int ret;
2889 2890 2891

	netif_tx_stop_all_queues(dev);
	netif_carrier_off(dev);
2892 2893
	ret = t4_enable_pi_params(adapter, adapter->pf, pi,
				  false, false, false);
2894 2895 2896 2897
#ifdef CONFIG_CHELSIO_T4_DCB
	cxgb4_dcb_reset(dev);
	dcb_tx_queue_prio_enable(dev, false);
#endif
2898 2899 2900 2901 2902
	if (ret)
		return ret;

	if (pi->nmirrorqsets) {
		mutex_lock(&pi->vi_mirror_mutex);
2903
		cxgb4_port_mirror_stop(dev);
2904 2905 2906 2907 2908
		cxgb4_port_mirror_free_queues(dev);
		mutex_unlock(&pi->vi_mirror_mutex);
	}

	return 0;
2909 2910
}

2911
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2912 2913
		__be32 sip, __be16 sport, __be16 vlan,
		unsigned int queue, unsigned char port, unsigned char mask)
2914 2915 2916 2917 2918 2919 2920 2921 2922
{
	int ret;
	struct filter_entry *f;
	struct adapter *adap;
	int i;
	u8 *val;

	adap = netdev2adap(dev);

2923
	/* Adjust stid to correct filter index */
2924
	stid -= adap->tids.sftid_base;
2925 2926
	stid += adap->tids.nftids;

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
	/* Check to make sure the filter requested is writable ...
	 */
	f = &adap->tids.ftid_tab[stid];
	ret = writable_filter(f);
	if (ret)
		return ret;

	/* Clear out any old resources being used by the filter before
	 * we start constructing the new filter.
	 */
	if (f->valid)
		clear_filter(adap, f);

	/* Clear out filter specifications */
	memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2942
	f->fs.val.lport = be16_to_cpu(sport);
2943 2944
	f->fs.mask.lport  = ~0;
	val = (u8 *)&sip;
2945
	if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2946 2947 2948 2949
		for (i = 0; i < 4; i++) {
			f->fs.val.lip[i] = val[i];
			f->fs.mask.lip[i] = ~0;
		}
2950
		if (adap->params.tp.vlan_pri_map & PORT_F) {
2951 2952 2953 2954
			f->fs.val.iport = port;
			f->fs.mask.iport = mask;
		}
	}
2955

2956
	if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2957 2958 2959 2960
		f->fs.val.proto = IPPROTO_TCP;
		f->fs.mask.proto = ~0;
	}

2961 2962 2963 2964 2965 2966
	f->fs.dirsteer = 1;
	f->fs.iq = queue;
	/* Mark filter as locked */
	f->locked = 1;
	f->fs.rpttid = 1;

2967 2968 2969 2970
	/* Save the actual tid. We need this to get the corresponding
	 * filter entry structure in filter_rpl.
	 */
	f->tid = stid + adap->tids.ftid_base;
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987
	ret = set_filter_wr(adap, stid);
	if (ret) {
		clear_filter(adap, f);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL(cxgb4_create_server_filter);

int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
		unsigned int queue, bool ipv6)
{
	struct filter_entry *f;
	struct adapter *adap;

	adap = netdev2adap(dev);
2988 2989

	/* Adjust stid to correct filter index */
2990
	stid -= adap->tids.sftid_base;
2991 2992
	stid += adap->tids.nftids;

2993 2994 2995 2996
	f = &adap->tids.ftid_tab[stid];
	/* Unlock the filter */
	f->locked = 0;

2997
	return delete_filter(adap, stid);
2998 2999 3000
}
EXPORT_SYMBOL(cxgb4_remove_server_filter);

3001 3002
static void cxgb_get_stats(struct net_device *dev,
			   struct rtnl_link_stats64 *ns)
3003 3004 3005 3006 3007
{
	struct port_stats stats;
	struct port_info *p = netdev_priv(dev);
	struct adapter *adapter = p->adapter;

3008 3009 3010 3011
	/* Block retrieving statistics during EEH error
	 * recovery. Otherwise, the recovery might fail
	 * and the PCI device will be removed permanently
	 */
3012
	spin_lock(&adapter->stats_lock);
3013 3014
	if (!netif_device_present(dev)) {
		spin_unlock(&adapter->stats_lock);
3015
		return;
3016
	}
3017 3018
	t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
				 &p->stats_base);
3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
	spin_unlock(&adapter->stats_lock);

	ns->tx_bytes   = stats.tx_octets;
	ns->tx_packets = stats.tx_frames;
	ns->rx_bytes   = stats.rx_octets;
	ns->rx_packets = stats.rx_frames;
	ns->multicast  = stats.rx_mcast_frames;

	/* detailed rx_errors */
	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
			       stats.rx_runt;
	ns->rx_over_errors   = 0;
	ns->rx_crc_errors    = stats.rx_fcs_err;
	ns->rx_frame_errors  = stats.rx_symbol_err;
3033
	ns->rx_dropped	     = stats.rx_ovflow0 + stats.rx_ovflow1 +
3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052
			       stats.rx_ovflow2 + stats.rx_ovflow3 +
			       stats.rx_trunc0 + stats.rx_trunc1 +
			       stats.rx_trunc2 + stats.rx_trunc3;
	ns->rx_missed_errors = 0;

	/* detailed tx_errors */
	ns->tx_aborted_errors   = 0;
	ns->tx_carrier_errors   = 0;
	ns->tx_fifo_errors      = 0;
	ns->tx_heartbeat_errors = 0;
	ns->tx_window_errors    = 0;

	ns->tx_errors = stats.tx_error_frames;
	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
}

static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
3053
	unsigned int mbox;
3054 3055
	int ret = 0, prtad, devad;
	struct port_info *pi = netdev_priv(dev);
3056
	struct adapter *adapter = pi->adapter;
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076
	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;

	switch (cmd) {
	case SIOCGMIIPHY:
		if (pi->mdio_addr < 0)
			return -EOPNOTSUPP;
		data->phy_id = pi->mdio_addr;
		break;
	case SIOCGMIIREG:
	case SIOCSMIIREG:
		if (mdio_phy_id_is_c45(data->phy_id)) {
			prtad = mdio_phy_id_prtad(data->phy_id);
			devad = mdio_phy_id_devad(data->phy_id);
		} else if (data->phy_id < 32) {
			prtad = data->phy_id;
			devad = 0;
			data->reg_num &= 0x1f;
		} else
			return -EINVAL;

3077
		mbox = pi->adapter->pf;
3078
		if (cmd == SIOCGMIIREG)
3079
			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3080 3081
					 data->reg_num, &data->val_out);
		else
3082
			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3083 3084
					 data->reg_num, data->val_in);
		break;
3085 3086 3087 3088 3089 3090 3091 3092 3093
	case SIOCGHWTSTAMP:
		return copy_to_user(req->ifr_data, &pi->tstamp_config,
				    sizeof(pi->tstamp_config)) ?
			-EFAULT : 0;
	case SIOCSHWTSTAMP:
		if (copy_from_user(&pi->tstamp_config, req->ifr_data,
				   sizeof(pi->tstamp_config)))
			return -EFAULT;

3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
		if (!is_t4(adapter->params.chip)) {
			switch (pi->tstamp_config.tx_type) {
			case HWTSTAMP_TX_OFF:
			case HWTSTAMP_TX_ON:
				break;
			default:
				return -ERANGE;
			}

			switch (pi->tstamp_config.rx_filter) {
			case HWTSTAMP_FILTER_NONE:
				pi->rxtstamp = false;
				break;
			case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
			case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
				cxgb4_ptprx_timestamping(pi, pi->port_id,
							 PTP_TS_L4);
				break;
			case HWTSTAMP_FILTER_PTP_V2_EVENT:
				cxgb4_ptprx_timestamping(pi, pi->port_id,
							 PTP_TS_L2_L4);
				break;
			case HWTSTAMP_FILTER_ALL:
			case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
			case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
			case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
			case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
				pi->rxtstamp = true;
				break;
			default:
				pi->tstamp_config.rx_filter =
					HWTSTAMP_FILTER_NONE;
				return -ERANGE;
			}

			if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
			    (pi->tstamp_config.rx_filter ==
				HWTSTAMP_FILTER_NONE)) {
				if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
					pi->ptp_enable = false;
			}

			if (pi->tstamp_config.rx_filter !=
				HWTSTAMP_FILTER_NONE) {
				if (cxgb4_ptp_redirect_rx_packet(adapter,
								 pi) >= 0)
					pi->ptp_enable = true;
			}
		} else {
			/* For T4 Adapters */
			switch (pi->tstamp_config.rx_filter) {
			case HWTSTAMP_FILTER_NONE:
3146 3147
			pi->rxtstamp = false;
			break;
3148
			case HWTSTAMP_FILTER_ALL:
3149 3150
			pi->rxtstamp = true;
			break;
3151 3152 3153
			default:
			pi->tstamp_config.rx_filter =
			HWTSTAMP_FILTER_NONE;
3154
			return -ERANGE;
3155
			}
3156 3157 3158 3159
		}
		return copy_to_user(req->ifr_data, &pi->tstamp_config,
				    sizeof(pi->tstamp_config)) ?
			-EFAULT : 0;
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174
	default:
		return -EOPNOTSUPP;
	}
	return ret;
}

static void cxgb_set_rxmode(struct net_device *dev)
{
	/* unfortunately we can't return errors to the stack */
	set_rxmode(dev, -1, false);
}

static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
	struct port_info *pi = netdev_priv(dev);
3175
	int ret;
3176

3177 3178
	ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
			    pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3179 3180 3181 3182 3183
	if (!ret)
		dev->mtu = new_mtu;
	return ret;
}

3184
#ifdef CONFIG_PCI_IOV
G
Ganesh Goudar 已提交
3185
static int cxgb4_mgmt_open(struct net_device *dev)
3186 3187 3188 3189 3190 3191 3192 3193
{
	/* Turn carrier off since we don't have to transmit anything on this
	 * interface.
	 */
	netif_carrier_off(dev);
	return 0;
}

3194
/* Fill MAC address that will be assigned by the FW */
G
Ganesh Goudar 已提交
3195
static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3196 3197
{
	u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
G
Ganesh Goudar 已提交
3198 3199
	unsigned int i, vf, nvfs;
	u16 a, b;
3200 3201 3202
	int err;
	u8 *na;

G
Ganesh Goudar 已提交
3203 3204
	adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
							    PCI_CAP_ID_VPD);
3205
	err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
G
Ganesh Goudar 已提交
3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226
	if (err)
		return;

	na = adap->params.vpd.na;
	for (i = 0; i < ETH_ALEN; i++)
		hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
			      hex2val(na[2 * i + 1]));

	a = (hw_addr[0] << 8) | hw_addr[1];
	b = (hw_addr[1] << 8) | hw_addr[2];
	a ^= b;
	a |= 0x0200;    /* locally assigned Ethernet MAC address */
	a &= ~0x0100;   /* not a multicast Ethernet MAC address */
	macaddr[0] = a >> 8;
	macaddr[1] = a & 0xff;

	for (i = 2; i < 5; i++)
		macaddr[i] = hw_addr[i + 1];

	for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
		vf < nvfs; vf++) {
3227
		macaddr[5] = adap->pf * nvfs + vf;
G
Ganesh Goudar 已提交
3228
		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3229 3230 3231
	}
}

G
Ganesh Goudar 已提交
3232
static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3233 3234 3235
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
3236
	int ret;
3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247

	/* verify MAC addr is valid */
	if (!is_valid_ether_addr(mac)) {
		dev_err(pi->adapter->pdev_dev,
			"Invalid Ethernet address %pM for VF %d\n",
			mac, vf);
		return -EINVAL;
	}

	dev_info(pi->adapter->pdev_dev,
		 "Setting MAC %pM on VF %d\n", mac, vf);
3248 3249 3250 3251 3252 3253
	ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
	if (!ret)
		ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
	return ret;
}

G
Ganesh Goudar 已提交
3254 3255
static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
				    int vf, struct ifla_vf_info *ivi)
3256 3257 3258
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
3259
	struct vf_info *vfinfo;
3260 3261 3262

	if (vf >= adap->num_vfs)
		return -EINVAL;
3263 3264
	vfinfo = &adap->vfinfo[vf];

3265
	ivi->vf = vf;
3266
	ivi->max_tx_rate = vfinfo->tx_rate;
3267
	ivi->min_tx_rate = 0;
3268 3269
	ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
	ivi->vlan = vfinfo->vlan;
3270
	ivi->linkstate = vfinfo->link_state;
3271
	return 0;
3272
}
3273

G
Ganesh Goudar 已提交
3274 3275
static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
				       struct netdev_phys_item_id *ppid)
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
{
	struct port_info *pi = netdev_priv(dev);
	unsigned int phy_port_id;

	phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
	ppid->id_len = sizeof(phy_port_id);
	memcpy(ppid->id, &phy_port_id, ppid->id_len);
	return 0;
}

G
Ganesh Goudar 已提交
3286 3287
static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
				  int min_tx_rate, int max_tx_rate)
3288 3289 3290
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
3291
	unsigned int link_ok, speed, mtu;
3292 3293
	u32 fw_pfvf, fw_class;
	int class_id = vf;
3294
	int ret;
3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
	u16 pktsize;

	if (vf >= adap->num_vfs)
		return -EINVAL;

	if (min_tx_rate) {
		dev_err(adap->pdev_dev,
			"Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
			min_tx_rate, vf);
		return -EINVAL;
	}
3306

3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327
	if (max_tx_rate == 0) {
		/* unbind VF to to any Traffic Class */
		fw_pfvf =
		    (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
		     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
		fw_class = 0xffffffff;
		ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
				    &fw_pfvf, &fw_class);
		if (ret) {
			dev_err(adap->pdev_dev,
				"Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
				ret, adap->pf, vf);
			return -EINVAL;
		}
		dev_info(adap->pdev_dev,
			 "PF %d VF %d is unbound from TX Rate Limiting\n",
			 adap->pf, vf);
		adap->vfinfo[vf].tx_rate = 0;
		return 0;
	}

3328
	ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3329 3330
	if (ret != FW_SUCCESS) {
		dev_err(adap->pdev_dev,
3331
			"Failed to get link information for VF %d\n", vf);
3332 3333
		return -EINVAL;
	}
3334

3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345
	if (!link_ok) {
		dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
		return -EINVAL;
	}

	if (max_tx_rate > speed) {
		dev_err(adap->pdev_dev,
			"Max tx rate %d for VF %d can't be > link-speed %u",
			max_tx_rate, vf, speed);
		return -EINVAL;
	}
3346 3347

	pktsize = mtu;
3348 3349 3350 3351 3352 3353 3354 3355 3356 3357
	/* subtract ethhdr size and 4 bytes crc since, f/w appends it */
	pktsize = pktsize - sizeof(struct ethhdr) - 4;
	/* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
	pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
	/* configure Traffic Class for rate-limiting */
	ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
			      SCHED_CLASS_LEVEL_CL_RL,
			      SCHED_CLASS_MODE_CLASS,
			      SCHED_CLASS_RATEUNIT_BITS,
			      SCHED_CLASS_RATEMODE_ABS,
3358
			      pi->tx_chan, class_id, 0,
3359
			      max_tx_rate * 1000, 0, pktsize, 0);
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
	if (ret) {
		dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
			ret);
		return -EINVAL;
	}
	dev_info(adap->pdev_dev,
		 "Class %d with MSS %u configured with rate %u\n",
		 class_id, pktsize, max_tx_rate);

	/* bind VF to configured Traffic Class */
	fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
		   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
	fw_class = class_id;
	ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
			    &fw_class);
	if (ret) {
		dev_err(adap->pdev_dev,
3377 3378
			"Err %d in binding PF %d VF %d to Traffic Class %d\n",
			ret, adap->pf, vf, class_id);
3379 3380 3381 3382 3383 3384 3385 3386
		return -EINVAL;
	}
	dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
		 adap->pf, vf, class_id);
	adap->vfinfo[vf].tx_rate = max_tx_rate;
	return 0;
}

3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409
static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
				  u16 vlan, u8 qos, __be16 vlan_proto)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	int ret;

	if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
		return -EINVAL;

	if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
		return -EPROTONOSUPPORT;

	ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
	if (!ret) {
		adap->vfinfo[vf].vlan = vlan;
		return 0;
	}

	dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
		ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
	return ret;
}
3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452

static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
					int link)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	u32 param, val;
	int ret = 0;

	if (vf >= adap->num_vfs)
		return -EINVAL;

	switch (link) {
	case IFLA_VF_LINK_STATE_AUTO:
		val = FW_VF_LINK_STATE_AUTO;
		break;

	case IFLA_VF_LINK_STATE_ENABLE:
		val = FW_VF_LINK_STATE_ENABLE;
		break;

	case IFLA_VF_LINK_STATE_DISABLE:
		val = FW_VF_LINK_STATE_DISABLE;
		break;

	default:
		return -EINVAL;
	}

	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
	ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
			    &param, &val);
	if (ret) {
		dev_err(adap->pdev_dev,
			"Error %d in setting PF %d VF %d link state\n",
			ret, adap->pf, vf);
		return -EINVAL;
	}

	adap->vfinfo[vf].link_state = link;
	return ret;
}
3453
#endif /* CONFIG_PCI_IOV */
3454

3455 3456 3457 3458 3459 3460 3461
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
	int ret;
	struct sockaddr *addr = p;
	struct port_info *pi = netdev_priv(dev);

	if (!is_valid_ether_addr(addr->sa_data))
3462
		return -EADDRNOTAVAIL;
3463

3464 3465
	ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
				    addr->sa_data, true, &pi->smt_idx);
3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
	if (ret < 0)
		return ret;

	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	return 0;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;

3479
	if (adap->flags & CXGB4_USING_MSIX) {
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
		int i;
		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];

		for (i = pi->nqsets; i; i--, rx++)
			t4_sge_intr_msix(0, &rx->rspq);
	} else
		t4_intr_handler(adap)(0, adap);
}
#endif

3490 3491 3492 3493
static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
3494 3495
	struct ch_sched_queue qe = { 0 };
	struct ch_sched_params p = { 0 };
3496 3497 3498 3499 3500 3501 3502 3503 3504 3505
	struct sched_class *e;
	u32 req_rate;
	int err = 0;

	if (!can_sched(dev))
		return -ENOTSUPP;

	if (index < 0 || index > pi->nqsets - 1)
		return -EINVAL;

3506
	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3507 3508 3509 3510 3511 3512
		dev_err(adap->pdev_dev,
			"Failed to rate limit on queue %d. Link Down?\n",
			index);
		return -EINVAL;
	}

3513 3514 3515 3516 3517 3518 3519 3520 3521
	qe.queue = index;
	e = cxgb4_sched_queue_lookup(dev, &qe);
	if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
		dev_err(adap->pdev_dev,
			"Queue %u already bound to class %u of type: %u\n",
			index, e->idx, e->info.u.params.level);
		return -EBUSY;
	}

3522
	/* Convert from Mbps to Kbps */
3523
	req_rate = rate * 1000;
3524

3525
	/* Max rate is 100 Gbps */
3526
	if (req_rate > SCHED_MAX_RATE_KBPS) {
3527
		dev_err(adap->pdev_dev,
3528
			"Invalid rate %u Mbps, Max rate is %u Mbps\n",
3529
			rate, SCHED_MAX_RATE_KBPS / 1000);
3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578
		return -ERANGE;
	}

	/* First unbind the queue from any existing class */
	memset(&qe, 0, sizeof(qe));
	qe.queue = index;
	qe.class = SCHED_CLS_NONE;

	err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
	if (err) {
		dev_err(adap->pdev_dev,
			"Unbinding Queue %d on port %d fail. Err: %d\n",
			index, pi->port_id, err);
		return err;
	}

	/* Queue already unbound */
	if (!req_rate)
		return 0;

	/* Fetch any available unused or matching scheduling class */
	p.type = SCHED_CLASS_TYPE_PACKET;
	p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
	p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
	p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
	p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
	p.u.params.channel  = pi->tx_chan;
	p.u.params.class    = SCHED_CLS_NONE;
	p.u.params.minrate  = 0;
	p.u.params.maxrate  = req_rate;
	p.u.params.weight   = 0;
	p.u.params.pktsize  = dev->mtu;

	e = cxgb4_sched_class_alloc(dev, &p);
	if (!e)
		return -ENOMEM;

	/* Bind the queue to a scheduling class */
	memset(&qe, 0, sizeof(qe));
	qe.queue = index;
	qe.class = e->idx;

	err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
	if (err)
		dev_err(adap->pdev_dev,
			"Queue rate limiting failed. Err: %d\n", err);
	return err;
}

3579
static int cxgb_setup_tc_flower(struct net_device *dev,
3580
				struct flow_cls_offload *cls_flower)
3581 3582
{
	switch (cls_flower->command) {
3583
	case FLOW_CLS_REPLACE:
3584
		return cxgb4_tc_flower_replace(dev, cls_flower);
3585
	case FLOW_CLS_DESTROY:
3586
		return cxgb4_tc_flower_destroy(dev, cls_flower);
3587
	case FLOW_CLS_STATS:
3588 3589 3590 3591 3592 3593
		return cxgb4_tc_flower_stats(dev, cls_flower);
	default:
		return -EOPNOTSUPP;
	}
}

3594 3595 3596 3597 3598 3599
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
				 struct tc_cls_u32_offload *cls_u32)
{
	switch (cls_u32->command) {
	case TC_CLSU32_NEW_KNODE:
	case TC_CLSU32_REPLACE_KNODE:
3600
		return cxgb4_config_knode(dev, cls_u32);
3601
	case TC_CLSU32_DELETE_KNODE:
3602
		return cxgb4_delete_knode(dev, cls_u32);
3603 3604 3605 3606 3607
	default:
		return -EOPNOTSUPP;
	}
}

3608
static int cxgb_setup_tc_matchall(struct net_device *dev,
3609 3610
				  struct tc_cls_matchall_offload *cls_matchall,
				  bool ingress)
3611 3612 3613 3614 3615 3616 3617 3618
{
	struct adapter *adap = netdev2adap(dev);

	if (!adap->tc_matchall)
		return -ENOMEM;

	switch (cls_matchall->command) {
	case TC_CLSMATCHALL_REPLACE:
3619
		return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3620
	case TC_CLSMATCHALL_DESTROY:
3621 3622 3623 3624 3625
		return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
	case TC_CLSMATCHALL_STATS:
		if (ingress)
			return cxgb4_tc_matchall_stats(dev, cls_matchall);
		break;
3626 3627 3628 3629 3630 3631 3632 3633 3634
	default:
		break;
	}

	return -EOPNOTSUPP;
}

static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
					  void *type_data, void *cb_priv)
3635
{
3636
	struct net_device *dev = cb_priv;
3637 3638 3639
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);

3640
	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3641 3642 3643 3644 3645 3646
		dev_err(adap->pdev_dev,
			"Failed to setup tc on port %d. Link Down?\n",
			pi->port_id);
		return -EINVAL;
	}

3647
	if (!tc_cls_can_offload_and_chain0(dev, type_data))
3648 3649
		return -EOPNOTSUPP;

3650 3651
	switch (type) {
	case TC_SETUP_CLSU32:
3652
		return cxgb_setup_tc_cls_u32(dev, type_data);
3653 3654
	case TC_SETUP_CLSFLOWER:
		return cxgb_setup_tc_flower(dev, type_data);
3655 3656
	case TC_SETUP_CLSMATCHALL:
		return cxgb_setup_tc_matchall(dev, type_data, true);
3657 3658
	default:
		return -EOPNOTSUPP;
3659 3660 3661
	}
}

3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680
static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
					 void *type_data, void *cb_priv)
{
	struct net_device *dev = cb_priv;
	struct port_info *pi = netdev2pinfo(dev);
	struct adapter *adap = netdev2adap(dev);

	if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
		dev_err(adap->pdev_dev,
			"Failed to setup tc on port %d. Link Down?\n",
			pi->port_id);
		return -EINVAL;
	}

	if (!tc_cls_can_offload_and_chain0(dev, type_data))
		return -EOPNOTSUPP;

	switch (type) {
	case TC_SETUP_CLSMATCHALL:
3681
		return cxgb_setup_tc_matchall(dev, type_data, false);
3682 3683 3684 3685 3686 3687 3688
	default:
		break;
	}

	return -EOPNOTSUPP;
}

3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
static int cxgb_setup_tc_mqprio(struct net_device *dev,
				struct tc_mqprio_qopt_offload *mqprio)
{
	struct adapter *adap = netdev2adap(dev);

	if (!is_ethofld(adap) || !adap->tc_mqprio)
		return -ENOMEM;

	return cxgb4_setup_tc_mqprio(dev, mqprio);
}

3700 3701
static LIST_HEAD(cxgb_block_cb_list);

3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721
static int cxgb_setup_tc_block(struct net_device *dev,
			       struct flow_block_offload *f)
{
	struct port_info *pi = netdev_priv(dev);
	flow_setup_cb_t *cb;
	bool ingress_only;

	pi->tc_block_shared = f->block_shared;
	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
		cb = cxgb_setup_tc_block_egress_cb;
		ingress_only = false;
	} else {
		cb = cxgb_setup_tc_block_ingress_cb;
		ingress_only = true;
	}

	return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
					  cb, pi, dev, ingress_only);
}

3722 3723 3724 3725
static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
			 void *type_data)
{
	switch (type) {
3726 3727
	case TC_SETUP_QDISC_MQPRIO:
		return cxgb_setup_tc_mqprio(dev, type_data);
3728
	case TC_SETUP_BLOCK:
3729
		return cxgb_setup_tc_block(dev, type_data);
3730 3731 3732 3733 3734
	default:
		return -EOPNOTSUPP;
	}
}

3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759
static void cxgb_del_udp_tunnel(struct net_device *netdev,
				struct udp_tunnel_info *ti)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adapter = pi->adapter;
	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
	int ret = 0, i;

	if (chip_ver < CHELSIO_T6)
		return;

	switch (ti->type) {
	case UDP_TUNNEL_TYPE_VXLAN:
		if (!adapter->vxlan_port_cnt ||
		    adapter->vxlan_port != ti->port)
			return; /* Invalid VxLAN destination port */

		adapter->vxlan_port_cnt--;
		if (adapter->vxlan_port_cnt)
			return;

		adapter->vxlan_port = 0;
		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
		break;
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770
	case UDP_TUNNEL_TYPE_GENEVE:
		if (!adapter->geneve_port_cnt ||
		    adapter->geneve_port != ti->port)
			return; /* Invalid GENEVE destination port */

		adapter->geneve_port_cnt--;
		if (adapter->geneve_port_cnt)
			return;

		adapter->geneve_port = 0;
		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3771
		break;
3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
	default:
		return;
	}

	/* Matchall mac entries can be deleted only after all tunnel ports
	 * are brought down or removed.
	 */
	if (!adapter->rawf_cnt)
		return;
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		ret = t4_free_raw_mac_filt(adapter, pi->viid,
					   match_all_mac, match_all_mac,
					   adapter->rawf_start +
					    pi->port_id,
3787
					   1, pi->port_id, false);
3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
		if (ret < 0) {
			netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
				    i);
			return;
		}
	}
}

static void cxgb_add_udp_tunnel(struct net_device *netdev,
				struct udp_tunnel_info *ti)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adapter = pi->adapter;
	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
	int i, ret;

3805
	if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
		return;

	switch (ti->type) {
	case UDP_TUNNEL_TYPE_VXLAN:
		/* Callback for adding vxlan port can be called with the same
		 * port for both IPv4 and IPv6. We should not disable the
		 * offloading when the same port for both protocols is added
		 * and later one of them is removed.
		 */
		if (adapter->vxlan_port_cnt &&
		    adapter->vxlan_port == ti->port) {
			adapter->vxlan_port_cnt++;
			return;
		}

		/* We will support only one VxLAN port */
		if (adapter->vxlan_port_cnt) {
			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
				    be16_to_cpu(adapter->vxlan_port),
				    be16_to_cpu(ti->port));
			return;
		}

		adapter->vxlan_port = ti->port;
		adapter->vxlan_port_cnt = 1;

		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
			     VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
		break;
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
	case UDP_TUNNEL_TYPE_GENEVE:
		if (adapter->geneve_port_cnt &&
		    adapter->geneve_port == ti->port) {
			adapter->geneve_port_cnt++;
			return;
		}

		/* We will support only one GENEVE port */
		if (adapter->geneve_port_cnt) {
			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
				    be16_to_cpu(adapter->geneve_port),
				    be16_to_cpu(ti->port));
			return;
		}

		adapter->geneve_port = ti->port;
		adapter->geneve_port_cnt = 1;

		t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
			     GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3855
		break;
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865
	default:
		return;
	}

	/* Create a 'match all' mac filter entry for inner mac,
	 * if raw mac interface is supported. Once the linux kernel provides
	 * driver entry points for adding/deleting the inner mac addresses,
	 * we will remove this 'match all' entry and fallback to adding
	 * exact match filters.
	 */
3866 3867 3868 3869 3870 3871 3872 3873
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);

		ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
					    match_all_mac,
					    match_all_mac,
					    adapter->rawf_start +
					    pi->port_id,
3874
					    1, pi->port_id, false);
3875 3876 3877 3878 3879
		if (ret < 0) {
			netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
				    be16_to_cpu(ti->port));
			cxgb_del_udp_tunnel(netdev, ti);
			return;
3880 3881 3882 3883
		}
	}
}

3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
static netdev_features_t cxgb_features_check(struct sk_buff *skb,
					     struct net_device *dev,
					     netdev_features_t features)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adapter = pi->adapter;

	if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
		return features;

	/* Check if hw supports offload for this packet */
	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
		return features;

	/* Offload is not supported for this encapsulated packet */
	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}

3902 3903 3904 3905 3906 3907 3908 3909 3910 3911
static netdev_features_t cxgb_fix_features(struct net_device *dev,
					   netdev_features_t features)
{
	/* Disable GRO, if RX_CSUM is disabled */
	if (!(features & NETIF_F_RXCSUM))
		features &= ~NETIF_F_GRO;

	return features;
}

3912 3913 3914
static const struct net_device_ops cxgb4_netdev_ops = {
	.ndo_open             = cxgb_open,
	.ndo_stop             = cxgb_close,
3915
	.ndo_start_xmit       = t4_start_xmit,
3916
	.ndo_select_queue     =	cxgb_select_queue,
3917
	.ndo_get_stats64      = cxgb_get_stats,
3918 3919
	.ndo_set_rx_mode      = cxgb_set_rxmode,
	.ndo_set_mac_address  = cxgb_set_mac_addr,
3920
	.ndo_set_features     = cxgb_set_features,
3921 3922 3923 3924 3925 3926
	.ndo_validate_addr    = eth_validate_addr,
	.ndo_do_ioctl         = cxgb_ioctl,
	.ndo_change_mtu       = cxgb_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller  = cxgb_netpoll,
#endif
V
Varun Prakash 已提交
3927 3928 3929 3930
#ifdef CONFIG_CHELSIO_T4_FCOE
	.ndo_fcoe_enable      = cxgb_fcoe_enable,
	.ndo_fcoe_disable     = cxgb_fcoe_disable,
#endif /* CONFIG_CHELSIO_T4_FCOE */
3931
	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
3932
	.ndo_setup_tc         = cxgb_setup_tc,
3933 3934
	.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
	.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
3935
	.ndo_features_check   = cxgb_features_check,
3936
	.ndo_fix_features     = cxgb_fix_features,
3937 3938
};

3939
#ifdef CONFIG_PCI_IOV
3940
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3941 3942 3943 3944 3945 3946 3947
	.ndo_open               = cxgb4_mgmt_open,
	.ndo_set_vf_mac         = cxgb4_mgmt_set_vf_mac,
	.ndo_get_vf_config      = cxgb4_mgmt_get_vf_config,
	.ndo_set_vf_rate        = cxgb4_mgmt_set_vf_rate,
	.ndo_get_phys_port_id   = cxgb4_mgmt_get_phys_port_id,
	.ndo_set_vf_vlan        = cxgb4_mgmt_set_vf_vlan,
	.ndo_set_vf_link_state	= cxgb4_mgmt_set_vf_link_state,
3948
};
3949
#endif
3950

G
Ganesh Goudar 已提交
3951 3952
static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
				   struct ethtool_drvinfo *info)
3953 3954 3955 3956 3957 3958 3959 3960 3961
{
	struct adapter *adapter = netdev2adap(dev);

	strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
	strlcpy(info->bus_info, pci_name(adapter->pdev),
		sizeof(info->bus_info));
}

static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
G
Ganesh Goudar 已提交
3962
	.get_drvinfo       = cxgb4_mgmt_get_drvinfo,
3963 3964
};

3965 3966 3967 3968 3969 3970 3971 3972
static void notify_fatal_err(struct work_struct *work)
{
	struct adapter *adap;

	adap = container_of(work, struct adapter, fatal_err_notify_task);
	notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
}

3973 3974
void t4_fatal_err(struct adapter *adap)
{
3975 3976
	int port;

3977 3978 3979
	if (pci_channel_offline(adap->pdev))
		return;

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995
	/* Disable the SGE since ULDs are going to free resources that
	 * could be exposed to the adapter.  RDMA MWs for example...
	 */
	t4_shutdown_adapter(adap);
	for_each_port(adap, port) {
		struct net_device *dev = adap->port[port];

		/* If we get here in very early initialization the network
		 * devices may not have been set up yet.
		 */
		if (!dev)
			continue;

		netif_tx_stop_all_queues(dev);
		netif_carrier_off(dev);
	}
3996
	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3997
	queue_work(adap->workq, &adap->fatal_err_notify_task);
3998 3999 4000 4001
}

static void setup_memwin(struct adapter *adap)
{
4002
	u32 nic_win_base = t4_get_util_window(adap);
4003

4004
	t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
4005 4006 4007 4008
}

static void setup_memwin_rdma(struct adapter *adap)
{
4009
	if (adap->vres.ocq.size) {
4010 4011
		u32 start;
		unsigned int sz_kb;
4012

4013 4014 4015
		start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
		start &= PCI_BASE_ADDRESS_MEM_MASK;
		start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4016 4017
		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
		t4_write_reg(adap,
4018 4019
			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
			     start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
4020
		t4_write_reg(adap,
4021
			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
4022 4023
			     adap->vres.ocq.start);
		t4_read_reg(adap,
4024
			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
4025
	}
4026 4027
}

A
Arjun Vynipadath 已提交
4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135
/* HMA Definitions */

/* The maximum number of address that can be send in a single FW cmd */
#define HMA_MAX_ADDR_IN_CMD	5

#define HMA_PAGE_SIZE		PAGE_SIZE

#define HMA_MAX_NO_FW_ADDRESS	(16 << 10)  /* FW supports 16K addresses */

#define HMA_PAGE_ORDER					\
	((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ?	\
	ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)

/* The minimum and maximum possible HMA sizes that can be specified in the FW
 * configuration(in units of MB).
 */
#define HMA_MIN_TOTAL_SIZE	1
#define HMA_MAX_TOTAL_SIZE				\
	(((HMA_PAGE_SIZE << HMA_PAGE_ORDER) *		\
	  HMA_MAX_NO_FW_ADDRESS) >> 20)

static void adap_free_hma_mem(struct adapter *adapter)
{
	struct scatterlist *iter;
	struct page *page;
	int i;

	if (!adapter->hma.sgt)
		return;

	if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
		dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
			     adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
		adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
	}

	for_each_sg(adapter->hma.sgt->sgl, iter,
		    adapter->hma.sgt->orig_nents, i) {
		page = sg_page(iter);
		if (page)
			__free_pages(page, HMA_PAGE_ORDER);
	}

	kfree(adapter->hma.phy_addr);
	sg_free_table(adapter->hma.sgt);
	kfree(adapter->hma.sgt);
	adapter->hma.sgt = NULL;
}

static int adap_config_hma(struct adapter *adapter)
{
	struct scatterlist *sgl, *iter;
	struct sg_table *sgt;
	struct page *newpage;
	unsigned int i, j, k;
	u32 param, hma_size;
	unsigned int ncmds;
	size_t page_size;
	u32 page_order;
	int node, ret;

	/* HMA is supported only for T6+ cards.
	 * Avoid initializing HMA in kdump kernels.
	 */
	if (is_kdump_kernel() ||
	    CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
		return 0;

	/* Get the HMA region size required by fw */
	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
			      1, &param, &hma_size);
	/* An error means card has its own memory or HMA is not supported by
	 * the firmware. Return without any errors.
	 */
	if (ret || !hma_size)
		return 0;

	if (hma_size < HMA_MIN_TOTAL_SIZE ||
	    hma_size > HMA_MAX_TOTAL_SIZE) {
		dev_err(adapter->pdev_dev,
			"HMA size %uMB beyond bounds(%u-%lu)MB\n",
			hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
		return -EINVAL;
	}

	page_size = HMA_PAGE_SIZE;
	page_order = HMA_PAGE_ORDER;
	adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
	if (unlikely(!adapter->hma.sgt)) {
		dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
		return -ENOMEM;
	}
	sgt = adapter->hma.sgt;
	/* FW returned value will be in MB's
	 */
	sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
	if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
		dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
		kfree(adapter->hma.sgt);
		adapter->hma.sgt = NULL;
		return -ENOMEM;
	}

	sgl = adapter->hma.sgt->sgl;
	node = dev_to_node(adapter->pdev_dev);
	for_each_sg(sgl, iter, sgt->orig_nents, i) {
G
Ganesh Goudar 已提交
4136 4137
		newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
					   __GFP_ZERO, page_order);
A
Arjun Vynipadath 已提交
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227
		if (!newpage) {
			dev_err(adapter->pdev_dev,
				"Not enough memory for HMA page allocation\n");
			ret = -ENOMEM;
			goto free_hma;
		}
		sg_set_page(iter, newpage, page_size << page_order, 0);
	}

	sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
				DMA_BIDIRECTIONAL);
	if (!sgt->nents) {
		dev_err(adapter->pdev_dev,
			"Not enough memory for HMA DMA mapping");
		ret = -ENOMEM;
		goto free_hma;
	}
	adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;

	adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
					GFP_KERNEL);
	if (unlikely(!adapter->hma.phy_addr))
		goto free_hma;

	for_each_sg(sgl, iter, sgt->nents, i) {
		newpage = sg_page(iter);
		adapter->hma.phy_addr[i] = sg_dma_address(iter);
	}

	ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
	/* Pass on the addresses to firmware */
	for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
		struct fw_hma_cmd hma_cmd;
		u8 naddr = HMA_MAX_ADDR_IN_CMD;
		u8 soc = 0, eoc = 0;
		u8 hma_mode = 1; /* Presently we support only Page table mode */

		soc = (i == 0) ? 1 : 0;
		eoc = (i == ncmds - 1) ? 1 : 0;

		/* For last cmd, set naddr corresponding to remaining
		 * addresses
		 */
		if (i == ncmds - 1) {
			naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
			naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
		}
		memset(&hma_cmd, 0, sizeof(hma_cmd));
		hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
				       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
		hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));

		hma_cmd.mode_to_pcie_params =
			htonl(FW_HMA_CMD_MODE_V(hma_mode) |
			      FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));

		/* HMA cmd size specified in MB's */
		hma_cmd.naddr_size =
			htonl(FW_HMA_CMD_SIZE_V(hma_size) |
			      FW_HMA_CMD_NADDR_V(naddr));

		/* Total Page size specified in units of 4K */
		hma_cmd.addr_size_pkd =
			htonl(FW_HMA_CMD_ADDR_SIZE_V
				((page_size << page_order) >> 12));

		/* Fill the 5 addresses */
		for (j = 0; j < naddr; j++) {
			hma_cmd.phy_address[j] =
				cpu_to_be64(adapter->hma.phy_addr[j + k]);
		}
		ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
				 sizeof(hma_cmd), &hma_cmd);
		if (ret) {
			dev_err(adapter->pdev_dev,
				"HMA FW command failed with err %d\n", ret);
			goto free_hma;
		}
	}

	if (!ret)
		dev_info(adapter->pdev_dev,
			 "Reserved %uMB host memory for HMA\n", hma_size);
	return ret;

free_hma:
	adap_free_hma_mem(adapter);
	return ret;
}

4228 4229 4230 4231 4232
static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
{
	u32 v;
	int ret;

4233 4234 4235 4236 4237 4238 4239 4240 4241 4242
	/* Now that we've successfully configured and initialized the adapter
	 * can ask the Firmware what resources it has provisioned for us.
	 */
	ret = t4_get_pfres(adap);
	if (ret) {
		dev_err(adap->pdev_dev,
			"Unable to retrieve resource provisioning information\n");
		return ret;
	}

4243 4244
	/* get device capabilities */
	memset(c, 0, sizeof(*c));
4245 4246
	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
			       FW_CMD_REQUEST_F | FW_CMD_READ_F);
4247
	c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4248
	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4249 4250 4251
	if (ret < 0)
		return ret;

4252 4253
	c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
			       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4254
	ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4255 4256 4257
	if (ret < 0)
		return ret;

4258
	ret = t4_config_glbl_rss(adap, adap->pf,
4259
				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4260 4261
				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4262 4263 4264
	if (ret < 0)
		return ret;

4265
	ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4266 4267
			  MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
			  FW_CMD_CAP_PF);
4268 4269 4270 4271 4272 4273
	if (ret < 0)
		return ret;

	t4_sge_init(adap);

	/* tweak some settings */
4274
	t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4275
	t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4276 4277 4278
	t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
	v = t4_read_reg(adap, TP_PIO_DATA_A);
	t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4279

4280 4281
	/* first 4 Tx modulation queues point to consecutive Tx channels */
	adap->params.tp.tx_modq_map = 0xE4;
4282 4283
	t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
		     TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4284 4285 4286

	/* associate each Tx modulation queue with consecutive Tx channels */
	v = 0x84218421;
4287
	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4288
			  &v, 1, TP_TX_SCHED_HDR_A);
4289
	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4290
			  &v, 1, TP_TX_SCHED_FIFO_A);
4291
	t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4292
			  &v, 1, TP_TX_SCHED_PCMD_A);
4293 4294 4295

#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
	if (is_offload(adap)) {
4296 4297 4298 4299 4300 4301 4302 4303 4304 4305
		t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
		t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
			     TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
			     TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4306 4307
	}

4308
	/* get basic stuff going */
4309
	return t4_early_init(adap, adap->pf);
4310 4311
}

4312 4313 4314 4315 4316
/*
 * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
 */
#define MAX_ATIDS 8192U

4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350
/*
 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
 *
 * If the firmware we're dealing with has Configuration File support, then
 * we use that to perform all configuration
 */

/*
 * Tweak configuration based on module parameters, etc.  Most of these have
 * defaults assigned to them by Firmware Configuration Files (if we're using
 * them) but need to be explicitly set if we're using hard-coded
 * initialization.  But even in the case of using Firmware Configuration
 * Files, we'd like to expose the ability to change these via module
 * parameters so these are essentially common tweaks/settings for
 * Configuration Files and hard-coded initialization ...
 */
static int adap_init0_tweaks(struct adapter *adapter)
{
	/*
	 * Fix up various Host-Dependent Parameters like Page Size, Cache
	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
	 * 64B Cache Line Size ...
	 */
	t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);

	/*
	 * Process module parameters which affect early initialization.
	 */
	if (rx_dma_offset != 2 && rx_dma_offset != 0) {
		dev_err(&adapter->pdev->dev,
			"Ignoring illegal rx_dma_offset=%d, using 2\n",
			rx_dma_offset);
		rx_dma_offset = 2;
	}
4351 4352 4353
	t4_set_reg_field(adapter, SGE_CONTROL_A,
			 PKTSHIFT_V(PKTSHIFT_M),
			 PKTSHIFT_V(rx_dma_offset));
4354 4355 4356 4357 4358

	/*
	 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
	 * adds the pseudo header itself.
	 */
4359 4360
	t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
			       CSUM_HAS_PSEUDO_HDR_F, 0);
4361 4362 4363 4364

	return 0;
}

4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
 * unto themselves and they contain their own firmware to perform their
 * tasks ...
 */
static int phy_aq1202_version(const u8 *phy_fw_data,
			      size_t phy_fw_size)
{
	int offset;

	/* At offset 0x8 you're looking for the primary image's
	 * starting offset which is 3 Bytes wide
	 *
	 * At offset 0xa of the primary image, you look for the offset
	 * of the DRAM segment which is 3 Bytes wide.
	 *
	 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
	 * wide
	 */
	#define be16(__p) (((__p)[0] << 8) | (__p)[1])
	#define le16(__p) ((__p)[0] | ((__p)[1] << 8))
	#define le24(__p) (le16(__p) | ((__p)[2] << 16))

	offset = le24(phy_fw_data + 0x8) << 12;
	offset = le24(phy_fw_data + offset + 0xa);
	return be16(phy_fw_data + offset + 0x27e);

	#undef be16
	#undef le16
	#undef le24
}

static struct info_10gbt_phy_fw {
	unsigned int phy_fw_id;		/* PCI Device ID */
	char *phy_fw_file;		/* /lib/firmware/ PHY Firmware file */
	int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
	int phy_flash;			/* Has FLASH for PHY Firmware */
} phy_info_array[] = {
	{
		PHY_AQ1202_DEVICEID,
		PHY_AQ1202_FIRMWARE,
		phy_aq1202_version,
		1,
	},
	{
		PHY_BCM84834_DEVICEID,
		PHY_BCM84834_FIRMWARE,
		NULL,
		0,
	},
	{ 0, NULL, NULL },
};

static struct info_10gbt_phy_fw *find_phy_info(int devid)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
		if (phy_info_array[i].phy_fw_id == devid)
			return &phy_info_array[i];
	}
	return NULL;
}

/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
 * we return a negative error number.  If we transfer new firmware we return 1
 * (from t4_load_phy_fw()).  If we don't do anything we return 0.
 */
static int adap_init0_phy(struct adapter *adap)
{
	const struct firmware *phyf;
	int ret;
	struct info_10gbt_phy_fw *phy_info;

	/* Use the device ID to determine which PHY file to flash.
	 */
	phy_info = find_phy_info(adap->pdev->device);
	if (!phy_info) {
		dev_warn(adap->pdev_dev,
			 "No PHY Firmware file found for this PHY\n");
		return -EOPNOTSUPP;
	}

	/* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
	 * use that. The adapter firmware provides us with a memory buffer
	 * where we can load a PHY firmware file from the host if we want to
	 * override the PHY firmware File in flash.
	 */
	ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
				      adap->pdev_dev);
	if (ret < 0) {
		/* For adapters without FLASH attached to PHY for their
		 * firmware, it's obviously a fatal error if we can't get the
		 * firmware to the adapter.  For adapters with PHY firmware
		 * FLASH storage, it's worth a warning if we can't find the
		 * PHY Firmware but we'll neuter the error ...
		 */
		dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
			"/lib/firmware/%s, error %d\n",
			phy_info->phy_fw_file, -ret);
		if (phy_info->phy_flash) {
			int cur_phy_fw_ver = 0;

			t4_phy_fw_ver(adap, &cur_phy_fw_ver);
			dev_warn(adap->pdev_dev, "continuing with, on-adapter "
				 "FLASH copy, version %#x\n", cur_phy_fw_ver);
			ret = 0;
		}

		return ret;
	}

	/* Load PHY Firmware onto adapter.
	 */
4479 4480
	spin_lock_bh(&adap->win0_lock);
	ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4481
			     (u8 *)phyf->data, phyf->size);
4482
	spin_unlock_bh(&adap->win0_lock);
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501
	if (ret < 0)
		dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
			-ret);
	else if (ret > 0) {
		int new_phy_fw_ver = 0;

		if (phy_info->phy_fw_version)
			new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
								  phyf->size);
		dev_info(adap->pdev_dev, "Successfully transferred PHY "
			 "Firmware /lib/firmware/%s, version %#x\n",
			 phy_info->phy_fw_file, new_phy_fw_ver);
	}

	release_firmware(phyf);

	return ret;
}

4502 4503 4504 4505 4506
/*
 * Attempt to initialize the adapter via a Firmware Configuration File.
 */
static int adap_init0_config(struct adapter *adapter, int reset)
{
4507 4508
	char *fw_config_file, fw_config_file_path[256];
	u32 finiver, finicsum, cfcsum, param, val;
4509 4510
	struct fw_caps_config_cmd caps_cmd;
	unsigned long mtype = 0, maddr = 0;
4511
	const struct firmware *cf;
4512
	char *config_name = NULL;
4513 4514
	int config_issued = 0;
	int ret;
4515 4516 4517 4518 4519 4520

	/*
	 * Reset device if necessary.
	 */
	if (reset) {
		ret = t4_fw_reset(adapter, adapter->mbox,
4521
				  PIORSTMODE_F | PIORST_F);
4522 4523 4524 4525
		if (ret < 0)
			goto bye;
	}

4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
	/* If this is a 10Gb/s-BT adapter make sure the chip-external
	 * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
	 * to be performed after any global adapter RESET above since some
	 * PHYs only have local RAM copies of the PHY firmware.
	 */
	if (is_10gbt_device(adapter->pdev->device)) {
		ret = adap_init0_phy(adapter);
		if (ret < 0)
			goto bye;
	}
4536 4537 4538 4539 4540
	/*
	 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
	 * then use that.  Otherwise, use the configuration file stored
	 * in the adapter flash ...
	 */
4541
	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
S
Santosh Rastapur 已提交
4542
	case CHELSIO_T4:
4543
		fw_config_file = FW4_CFNAME;
S
Santosh Rastapur 已提交
4544 4545 4546 4547
		break;
	case CHELSIO_T5:
		fw_config_file = FW5_CFNAME;
		break;
4548 4549 4550
	case CHELSIO_T6:
		fw_config_file = FW6_CFNAME;
		break;
S
Santosh Rastapur 已提交
4551 4552 4553 4554 4555 4556 4557 4558
	default:
		dev_err(adapter->pdev_dev, "Device %d is not supported\n",
		       adapter->pdev->device);
		ret = -EINVAL;
		goto bye;
	}

	ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4559
	if (ret < 0) {
4560
		config_name = "On FLASH";
4561 4562 4563 4564 4565
		mtype = FW_MEMTYPE_CF_FLASH;
		maddr = t4_flash_cfg_addr(adapter);
	} else {
		u32 params[7], val[7];

4566 4567 4568 4569
		sprintf(fw_config_file_path,
			"/lib/firmware/%s", fw_config_file);
		config_name = fw_config_file_path;

4570 4571 4572
		if (cf->size >= FLASH_CFG_MAX_SIZE)
			ret = -ENOMEM;
		else {
4573 4574
			params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4575
			ret = t4_query_params(adapter, adapter->mbox,
4576
					      adapter->pf, 0, 1, params, val);
4577 4578
			if (ret == 0) {
				/*
4579
				 * For t4_memory_rw() below addresses and
4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591
				 * sizes have to be in terms of multiples of 4
				 * bytes.  So, if the Configuration File isn't
				 * a multiple of 4 bytes in length we'll have
				 * to write that out separately since we can't
				 * guarantee that the bytes following the
				 * residual byte in the buffer returned by
				 * request_firmware() are zeroed out ...
				 */
				size_t resid = cf->size & 0x3;
				size_t size = cf->size & ~0x3;
				__be32 *data = (__be32 *)cf->data;

4592 4593
				mtype = FW_PARAMS_PARAM_Y_G(val[0]);
				maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4594

4595 4596 4597
				spin_lock(&adapter->win0_lock);
				ret = t4_memory_rw(adapter, 0, mtype, maddr,
						   size, data, T4_MEMORY_WRITE);
4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
				if (ret == 0 && resid != 0) {
					union {
						__be32 word;
						char buf[4];
					} last;
					int i;

					last.word = data[size >> 2];
					for (i = resid; i < 4; i++)
						last.buf[i] = 0;
4608 4609 4610 4611
					ret = t4_memory_rw(adapter, 0, mtype,
							   maddr + size,
							   4, &last.word,
							   T4_MEMORY_WRITE);
4612
				}
4613
				spin_unlock(&adapter->win0_lock);
4614 4615 4616 4617 4618 4619 4620 4621
			}
		}

		release_firmware(cf);
		if (ret)
			goto bye;
	}

4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639
	val = 0;

	/* Ofld + Hash filter is supported. Older fw will fail this request and
	 * it is fine.
	 */
	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
	ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
			    1, &param, &val);

	/* FW doesn't know about Hash filter + ofld support,
	 * it's not a problem, don't return an error.
	 */
	if (ret < 0) {
		dev_warn(adapter->pdev_dev,
			 "Hash filter with ofld is not supported by FW\n");
	}

4640 4641 4642 4643 4644 4645 4646 4647
	/*
	 * Issue a Capability Configuration command to the firmware to get it
	 * to parse the Configuration File.  We don't use t4_fw_config_file()
	 * because we want the ability to modify various features after we've
	 * processed the configuration file ...
	 */
	memset(&caps_cmd, 0, sizeof(caps_cmd));
	caps_cmd.op_to_write =
4648 4649 4650
		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
		      FW_CMD_REQUEST_F |
		      FW_CMD_READ_F);
4651
	caps_cmd.cfvalid_to_len16 =
4652 4653 4654
		htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
		      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
		      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4655 4656 4657
		      FW_LEN16(caps_cmd));
	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
			 &caps_cmd);
4658 4659 4660 4661 4662 4663 4664 4665 4666 4667

	/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
	 * Configuration File in FLASH), our last gasp effort is to use the
	 * Firmware Configuration File which is embedded in the firmware.  A
	 * very few early versions of the firmware didn't have one embedded
	 * but we can ignore those.
	 */
	if (ret == -ENOENT) {
		memset(&caps_cmd, 0, sizeof(caps_cmd));
		caps_cmd.op_to_write =
4668 4669 4670
			htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
					FW_CMD_REQUEST_F |
					FW_CMD_READ_F);
4671 4672 4673 4674 4675 4676 4677
		caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
		ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
				sizeof(caps_cmd), &caps_cmd);
		config_name = "Firmware Default";
	}

	config_issued = 1;
4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692
	if (ret < 0)
		goto bye;

	finiver = ntohl(caps_cmd.finiver);
	finicsum = ntohl(caps_cmd.finicsum);
	cfcsum = ntohl(caps_cmd.cfcsum);
	if (finicsum != cfcsum)
		dev_warn(adapter->pdev_dev, "Configuration File checksum "\
			 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
			 finicsum, cfcsum);

	/*
	 * And now tell the firmware to use the configuration we just loaded.
	 */
	caps_cmd.op_to_write =
4693 4694 4695
		htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
		      FW_CMD_REQUEST_F |
		      FW_CMD_WRITE_F);
4696
	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709
	ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
			 NULL);
	if (ret < 0)
		goto bye;

	/*
	 * Tweak configuration based on system architecture, module
	 * parameters, etc.
	 */
	ret = adap_init0_tweaks(adapter);
	if (ret < 0)
		goto bye;

A
Arjun Vynipadath 已提交
4710 4711 4712 4713 4714 4715
	/* We will proceed even if HMA init fails. */
	ret = adap_config_hma(adapter);
	if (ret)
		dev_err(adapter->pdev_dev,
			"HMA configuration failed with error %d\n", ret);

4716
	if (is_t6(adapter->params.chip)) {
4717
		adap_config_hpfilter(adapter);
4718 4719 4720 4721 4722 4723
		ret = setup_ppod_edram(adapter);
		if (!ret)
			dev_info(adapter->pdev_dev, "Successfully enabled "
				 "ppod edram feature\n");
	}

4724 4725 4726 4727 4728 4729 4730 4731
	/*
	 * And finally tell the firmware to initialize itself using the
	 * parameters from the Configuration File.
	 */
	ret = t4_fw_initialize(adapter, adapter->mbox);
	if (ret < 0)
		goto bye;

4732 4733
	/* Emit Firmware Configuration File information and return
	 * successfully.
4734 4735
	 */
	dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4736 4737
		 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
		 config_name, finiver, cfcsum);
4738 4739 4740 4741 4742 4743 4744 4745
	return 0;

	/*
	 * Something bad happened.  Return the error ...  (If the "error"
	 * is that there's no Configuration File on the adapter we don't
	 * want to issue a warning since this is fairly common.)
	 */
bye:
4746 4747 4748
	if (config_issued && ret != -ENOENT)
		dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
			 config_name, -ret);
4749 4750 4751
	return ret;
}

4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778
static struct fw_info fw_info_array[] = {
	{
		.chip = CHELSIO_T4,
		.fs_name = FW4_CFNAME,
		.fw_mod_name = FW4_FNAME,
		.fw_hdr = {
			.chip = FW_HDR_CHIP_T4,
			.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
			.intfver_nic = FW_INTFVER(T4, NIC),
			.intfver_vnic = FW_INTFVER(T4, VNIC),
			.intfver_ri = FW_INTFVER(T4, RI),
			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
			.intfver_fcoe = FW_INTFVER(T4, FCOE),
		},
	}, {
		.chip = CHELSIO_T5,
		.fs_name = FW5_CFNAME,
		.fw_mod_name = FW5_FNAME,
		.fw_hdr = {
			.chip = FW_HDR_CHIP_T5,
			.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
			.intfver_nic = FW_INTFVER(T5, NIC),
			.intfver_vnic = FW_INTFVER(T5, VNIC),
			.intfver_ri = FW_INTFVER(T5, RI),
			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
			.intfver_fcoe = FW_INTFVER(T5, FCOE),
		},
4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794
	}, {
		.chip = CHELSIO_T6,
		.fs_name = FW6_CFNAME,
		.fw_mod_name = FW6_FNAME,
		.fw_hdr = {
			.chip = FW_HDR_CHIP_T6,
			.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
			.intfver_nic = FW_INTFVER(T6, NIC),
			.intfver_vnic = FW_INTFVER(T6, VNIC),
			.intfver_ofld = FW_INTFVER(T6, OFLD),
			.intfver_ri = FW_INTFVER(T6, RI),
			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
			.intfver_fcoe = FW_INTFVER(T6, FCOE),
		},
4795
	}
4796

4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809
};

static struct fw_info *find_fw_info(int chip)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
		if (fw_info_array[i].chip == chip)
			return &fw_info_array[i];
	}
	return NULL;
}

4810 4811 4812
/*
 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
 */
V
Vishal Kulkarni 已提交
4813
static int adap_init0(struct adapter *adap, int vpd_skip)
4814
{
4815
	struct fw_caps_config_cmd caps_cmd;
V
Vishal Kulkarni 已提交
4816 4817 4818
	u32 params[7], val[7];
	enum dev_state state;
	u32 v, port_vec;
4819
	int reset = 1;
V
Vishal Kulkarni 已提交
4820
	int ret;
4821

4822 4823 4824 4825 4826 4827 4828
	/* Grab Firmware Device Log parameters as early as possible so we have
	 * access to it for debugging, etc.
	 */
	ret = t4_init_devlog_params(adap);
	if (ret < 0)
		return ret;

4829
	/* Contact FW, advertising Master capability */
4830 4831
	ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
			  is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4832 4833 4834 4835 4836
	if (ret < 0) {
		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
			ret);
		return ret;
	}
4837
	if (ret == adap->mbox)
4838
		adap->flags |= CXGB4_MASTER_PF;
4839

4840 4841 4842 4843 4844 4845 4846
	/*
	 * If we're the Master PF Driver and the device is uninitialized,
	 * then let's consider upgrading the firmware ...  (We always want
	 * to check the firmware version number in order to A. get it for
	 * later reporting and B. to warn if the currently loaded firmware
	 * is excessively mismatched relative to the driver.)
	 */
4847

4848
	t4_get_version_info(adap);
4849 4850
	ret = t4_check_fw_version(adap);
	/* If firmware is too old (not supported by driver) force an update. */
4851
	if (ret)
4852
		state = DEV_STATE_UNINIT;
4853
	if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868
		struct fw_info *fw_info;
		struct fw_hdr *card_fw;
		const struct firmware *fw;
		const u8 *fw_data = NULL;
		unsigned int fw_size = 0;

		/* This is the firmware whose headers the driver was compiled
		 * against
		 */
		fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
		if (fw_info == NULL) {
			dev_err(adap->pdev_dev,
				"unable to get firmware info for chip %d.\n",
				CHELSIO_CHIP_VERSION(adap->params.chip));
			return -EINVAL;
4869
		}
4870 4871 4872 4873

		/* allocate memory to read the header of the firmware on the
		 * card
		 */
4874
		card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4875 4876 4877 4878
		if (!card_fw) {
			ret = -ENOMEM;
			goto bye;
		}
4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896

		/* Get FW from from /lib/firmware/ */
		ret = request_firmware(&fw, fw_info->fw_mod_name,
				       adap->pdev_dev);
		if (ret < 0) {
			dev_err(adap->pdev_dev,
				"unable to load firmware image %s, error %d\n",
				fw_info->fw_mod_name, ret);
		} else {
			fw_data = fw->data;
			fw_size = fw->size;
		}

		/* upgrade FW logic */
		ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
				 state, &reset);

		/* Cleaning up */
4897
		release_firmware(fw);
4898
		kvfree(card_fw);
4899

4900
		if (ret < 0)
4901
			goto bye;
4902
	}
4903

4904 4905
	/* If the firmware is initialized already, emit a simply note to that
	 * effect. Otherwise, it's time to try initializing the adapter.
4906 4907
	 */
	if (state == DEV_STATE_INIT) {
A
Arjun Vynipadath 已提交
4908 4909 4910 4911 4912
		ret = adap_config_hma(adap);
		if (ret)
			dev_err(adap->pdev_dev,
				"HMA configuration failed with error %d\n",
				ret);
4913 4914
		dev_info(adap->pdev_dev, "Coming up as %s: "\
			 "Adapter already initialized\n",
4915
			 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4916 4917 4918
	} else {
		dev_info(adap->pdev_dev, "Coming up as MASTER: "\
			 "Initializing adapter\n");
4919 4920 4921

		/* Find out whether we're dealing with a version of the
		 * firmware which has configuration file support.
4922
		 */
4923 4924
		params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
			     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4925
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4926
				      params, val);
4927

4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945
		/* If the firmware doesn't support Configuration Files,
		 * return an error.
		 */
		if (ret < 0) {
			dev_err(adap->pdev_dev, "firmware doesn't support "
				"Firmware Configuration Files\n");
			goto bye;
		}

		/* The firmware provides us with a memory buffer where we can
		 * load a Configuration File from the host if we want to
		 * override the Configuration File in flash.
		 */
		ret = adap_init0_config(adap, reset);
		if (ret == -ENOENT) {
			dev_err(adap->pdev_dev, "no Configuration File "
				"present on adapter.\n");
			goto bye;
4946 4947
		}
		if (ret < 0) {
4948 4949
			dev_err(adap->pdev_dev, "could not initialize "
				"adapter, error %d\n", -ret);
4950 4951 4952 4953
			goto bye;
		}
	}

4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974
	/* Now that we've successfully configured and initialized the adapter
	 * (or found it already initialized), we can ask the Firmware what
	 * resources it has provisioned for us.
	 */
	ret = t4_get_pfres(adap);
	if (ret) {
		dev_err(adap->pdev_dev,
			"Unable to retrieve resource provisioning information\n");
		goto bye;
	}

	/* Grab VPD parameters.  This should be done after we establish a
	 * connection to the firmware since some of the VPD parameters
	 * (notably the Core Clock frequency) are retrieved via requests to
	 * the firmware.  On the other hand, we need these fairly early on
	 * so we do this right after getting ahold of the firmware.
	 *
	 * We need to do this after initializing the adapter because someone
	 * could have FLASHed a new VPD which won't be read by the firmware
	 * until we do the RESET ...
	 */
V
Vishal Kulkarni 已提交
4975 4976 4977 4978 4979
	if (!vpd_skip) {
		ret = t4_get_vpd_params(adap, &adap->params.vpd);
		if (ret < 0)
			goto bye;
	}
4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994

	/* Find out what ports are available to us.  Note that we need to do
	 * this before calling adap_init0_no_config() since it needs nports
	 * and portvec ...
	 */
	v =
	    FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
	    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
	if (ret < 0)
		goto bye;

	adap->params.nports = hweight32(port_vec);
	adap->params.portvec = port_vec;

4995 4996 4997
	/* Give the SGE code a chance to pull in anything that it needs ...
	 * Note that this must be called after we retrieve our VPD parameters
	 * in order to know how to convert core ticks to seconds, etc.
4998
	 */
4999 5000 5001
	ret = t4_sge_init(adap);
	if (ret < 0)
		goto bye;
5002

5003 5004 5005
	/* Grab the SGE Doorbell Queue Timer values.  If successful, that
	 * indicates that the Firmware and Hardware support this.
	 */
5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017
	params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
		    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
			      1, params, val);

	if (!ret) {
		adap->sge.dbqtimer_tick = val[0];
		ret = t4_read_sge_dbqtimers(adap,
					    ARRAY_SIZE(adap->sge.dbqtimer_val),
					    adap->sge.dbqtimer_val);
	}

5018
	if (!ret)
5019
		adap->flags |= CXGB4_SGE_DBQ_TIMER;
5020

5021 5022 5023
	if (is_bypass_device(adap->pdev->device))
		adap->params.bypass = 1;

5024 5025 5026 5027
	/*
	 * Grab some of our basic fundamental operating parameters.
	 */
	params[0] = FW_PARAM_PFVF(EQ_START);
5028 5029 5030 5031
	params[1] = FW_PARAM_PFVF(L2T_START);
	params[2] = FW_PARAM_PFVF(L2T_END);
	params[3] = FW_PARAM_PFVF(FILTER_START);
	params[4] = FW_PARAM_PFVF(FILTER_END);
5032
	params[5] = FW_PARAM_PFVF(IQFLINT_START);
5033
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
5034 5035
	if (ret < 0)
		goto bye;
5036 5037 5038
	adap->sge.egr_start = val[0];
	adap->l2t_start = val[1];
	adap->l2t_end = val[2];
5039 5040
	adap->tids.ftid_base = val[3];
	adap->tids.nftids = val[4] - val[3] + 1;
5041
	adap->sge.ingr_start = val[5];
5042

5043
	if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
		params[0] = FW_PARAM_PFVF(HPFILTER_START);
		params[1] = FW_PARAM_PFVF(HPFILTER_END);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
				      params, val);
		if (ret < 0)
			goto bye;

		adap->tids.hpftid_base = val[0];
		adap->tids.nhpftids = val[1] - val[0] + 1;

5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064
		/* Read the raw mps entries. In T6, the last 2 tcam entries
		 * are reserved for raw mac addresses (rawf = 2, one per port).
		 */
		params[0] = FW_PARAM_PFVF(RAWF_START);
		params[1] = FW_PARAM_PFVF(RAWF_END);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
				      params, val);
		if (ret == 0) {
			adap->rawf_start = val[0];
			adap->rawf_cnt = val[1] - val[0] + 1;
		}
5065 5066 5067

		adap->tids.tid_base =
			t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
5068 5069
	}

5070 5071 5072 5073 5074 5075 5076 5077
	/* qids (ingress/egress) returned from firmware can be anywhere
	 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
	 * Hence driver needs to allocate memory for this range to
	 * store the queue info. Get the highest IQFLINT/EQ index returned
	 * in FW_EQ_*_CMD.alloc command.
	 */
	params[0] = FW_PARAM_PFVF(EQ_END);
	params[1] = FW_PARAM_PFVF(IQFLINT_END);
5078
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098
	if (ret < 0)
		goto bye;
	adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
	adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;

	adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
				    sizeof(*adap->sge.egr_map), GFP_KERNEL);
	if (!adap->sge.egr_map) {
		ret = -ENOMEM;
		goto bye;
	}

	adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
				     sizeof(*adap->sge.ingr_map), GFP_KERNEL);
	if (!adap->sge.ingr_map) {
		ret = -ENOMEM;
		goto bye;
	}

	/* Allocate the memory for the vaious egress queue bitmaps
5099
	 * ie starving_fl, txq_maperr and blocked_fl.
5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114
	 */
	adap->sge.starving_fl =	kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
					sizeof(long), GFP_KERNEL);
	if (!adap->sge.starving_fl) {
		ret = -ENOMEM;
		goto bye;
	}

	adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
				       sizeof(long), GFP_KERNEL);
	if (!adap->sge.txq_maperr) {
		ret = -ENOMEM;
		goto bye;
	}

5115 5116 5117 5118 5119 5120 5121 5122 5123
#ifdef CONFIG_DEBUG_FS
	adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
				       sizeof(long), GFP_KERNEL);
	if (!adap->sge.blocked_fl) {
		ret = -ENOMEM;
		goto bye;
	}
#endif

5124 5125
	params[0] = FW_PARAM_PFVF(CLIP_START);
	params[1] = FW_PARAM_PFVF(CLIP_END);
5126
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5127 5128 5129 5130 5131
	if (ret < 0)
		goto bye;
	adap->clipt_start = val[0];
	adap->clipt_end = val[1];

5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143
	/* Get the supported number of traffic classes */
	params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
	if (ret < 0) {
		/* We couldn't retrieve the number of Traffic Classes
		 * supported by the hardware/firmware. So we hard
		 * code it here.
		 */
		adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
	} else {
		adap->params.nsched_cls = val[0];
	}
5144

5145 5146 5147
	/* query params related to active filter region */
	params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
	params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5148
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5149 5150 5151 5152
	/* If Active filter size is set we enable establishing
	 * offload connection through firmware work request
	 */
	if ((val[0] != val[1]) && (ret >= 0)) {
5153
		adap->flags |= CXGB4_FW_OFLD_CONN;
5154 5155 5156 5157
		adap->tids.aftid_base = val[0];
		adap->tids.aftid_end = val[1];
	}

5158 5159 5160 5161 5162 5163 5164
	/* If we're running on newer firmware, let it know that we're
	 * prepared to deal with encapsulated CPL messages.  Older
	 * firmware won't understand this and we'll just get
	 * unencapsulated messages ...
	 */
	params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
	val[0] = 1;
5165
	(void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5166

5167 5168 5169 5170 5171 5172 5173 5174 5175 5176
	/*
	 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
	 * capability.  Earlier versions of the firmware didn't have the
	 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
	 * permission to use ULPTX MEMWRITE DSGL.
	 */
	if (is_t4(adap->params.chip)) {
		adap->params.ulptx_memwrite_dsgl = false;
	} else {
		params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5177
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5178 5179 5180 5181
				      1, params, val);
		adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
	}

5182 5183 5184 5185 5186 5187
	/* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
	params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
			      1, params, val);
	adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);

5188 5189 5190 5191 5192 5193 5194 5195 5196 5197
	/* See if FW supports FW_FILTER2 work request */
	if (is_t4(adap->params.chip)) {
		adap->params.filter2_wr_support = 0;
	} else {
		params[0] = FW_PARAM_DEV(FILTER2_WR);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
				      1, params, val);
		adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
	}

5198 5199 5200 5201 5202 5203 5204 5205 5206
	/* Check if FW supports returning vin and smt index.
	 * If this is not supported, driver will interpret
	 * these values from viid.
	 */
	params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
	ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
			      1, params, val);
	adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);

5207 5208 5209 5210 5211
	/*
	 * Get device capabilities so we can determine what resources we need
	 * to manage.
	 */
	memset(&caps_cmd, 0, sizeof(caps_cmd));
5212 5213
	caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
				     FW_CMD_REQUEST_F | FW_CMD_READ_F);
5214
	caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5215 5216 5217 5218 5219
	ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
			 &caps_cmd);
	if (ret < 0)
		goto bye;

5220 5221 5222 5223 5224 5225 5226
	/* hash filter has some mandatory register settings to be tested and for
	 * that it needs to test whether offload is enabled or not, hence
	 * checking and setting it here.
	 */
	if (caps_cmd.ofldcaps)
		adap->params.offload = 1;

5227
	if (caps_cmd.ofldcaps ||
5228 5229
	    (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
	    (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
5230 5231 5232 5233 5234 5235 5236
		/* query offload-related parameters */
		params[0] = FW_PARAM_DEV(NTID);
		params[1] = FW_PARAM_PFVF(SERVER_START);
		params[2] = FW_PARAM_PFVF(SERVER_END);
		params[3] = FW_PARAM_PFVF(TDDP_START);
		params[4] = FW_PARAM_PFVF(TDDP_END);
		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5237
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5238
				      params, val);
5239 5240 5241 5242 5243 5244
		if (ret < 0)
			goto bye;
		adap->tids.ntids = val[0];
		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
		adap->tids.stid_base = val[1];
		adap->tids.nstids = val[2] - val[1] + 1;
5245
		/*
5246
		 * Setup server filter region. Divide the available filter
5247 5248 5249 5250 5251 5252 5253
		 * region into two parts. Regular filters get 1/3rd and server
		 * filters get 2/3rd part. This is only enabled if workarond
		 * path is enabled.
		 * 1. For regular filters.
		 * 2. Server filter: This are special filters which are used
		 * to redirect SYN packets to offload queue.
		 */
5254
		if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5255 5256 5257 5258 5259 5260 5261
			adap->tids.sftid_base = adap->tids.ftid_base +
					DIV_ROUND_UP(adap->tids.nftids, 3);
			adap->tids.nsftids = adap->tids.nftids -
					 DIV_ROUND_UP(adap->tids.nftids, 3);
			adap->tids.nftids = adap->tids.sftid_base -
						adap->tids.ftid_base;
		}
5262 5263 5264
		adap->vres.ddp.start = val[3];
		adap->vres.ddp.size = val[4] - val[3] + 1;
		adap->params.ofldq_wr_cred = val[5];
5265

5266
		if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5267
			init_hash_filter(adap);
5268 5269 5270
		} else {
			adap->num_ofld_uld += 1;
		}
5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283

		if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
			params[0] = FW_PARAM_PFVF(ETHOFLD_START);
			params[1] = FW_PARAM_PFVF(ETHOFLD_END);
			ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
					      params, val);
			if (!ret) {
				adap->tids.eotid_base = val[0];
				adap->tids.neotids = min_t(u32, MAX_ATIDS,
							   val[1] - val[0] + 1);
				adap->params.ethofld = 1;
			}
		}
5284
	}
5285
	if (caps_cmd.rdmacaps) {
5286 5287 5288 5289 5290 5291
		params[0] = FW_PARAM_PFVF(STAG_START);
		params[1] = FW_PARAM_PFVF(STAG_END);
		params[2] = FW_PARAM_PFVF(RQ_START);
		params[3] = FW_PARAM_PFVF(RQ_END);
		params[4] = FW_PARAM_PFVF(PBL_START);
		params[5] = FW_PARAM_PFVF(PBL_END);
5292
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5293
				      params, val);
5294 5295 5296 5297 5298 5299 5300 5301
		if (ret < 0)
			goto bye;
		adap->vres.stag.start = val[0];
		adap->vres.stag.size = val[1] - val[0] + 1;
		adap->vres.rq.start = val[2];
		adap->vres.rq.size = val[3] - val[2] + 1;
		adap->vres.pbl.start = val[4];
		adap->vres.pbl.size = val[5] - val[4] + 1;
5302

5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316
		params[0] = FW_PARAM_PFVF(SRQ_START);
		params[1] = FW_PARAM_PFVF(SRQ_END);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
				      params, val);
		if (!ret) {
			adap->vres.srq.start = val[0];
			adap->vres.srq.size = val[1] - val[0] + 1;
		}
		if (adap->vres.srq.size) {
			adap->srq = t4_init_srq(adap->vres.srq.size);
			if (!adap->srq)
				dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
		}

5317 5318 5319 5320
		params[0] = FW_PARAM_PFVF(SQRQ_START);
		params[1] = FW_PARAM_PFVF(SQRQ_END);
		params[2] = FW_PARAM_PFVF(CQ_START);
		params[3] = FW_PARAM_PFVF(CQ_END);
5321 5322
		params[4] = FW_PARAM_PFVF(OCQ_START);
		params[5] = FW_PARAM_PFVF(OCQ_END);
5323
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5324
				      val);
5325 5326 5327 5328 5329 5330
		if (ret < 0)
			goto bye;
		adap->vres.qp.start = val[0];
		adap->vres.qp.size = val[1] - val[0] + 1;
		adap->vres.cq.start = val[2];
		adap->vres.cq.size = val[3] - val[2] + 1;
5331 5332
		adap->vres.ocq.start = val[4];
		adap->vres.ocq.size = val[5] - val[4] + 1;
5333 5334 5335

		params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
		params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5336
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5337
				      val);
5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349
		if (ret < 0) {
			adap->params.max_ordird_qp = 8;
			adap->params.max_ird_adapter = 32 * adap->tids.ntids;
			ret = 0;
		} else {
			adap->params.max_ordird_qp = val[0];
			adap->params.max_ird_adapter = val[1];
		}
		dev_info(adap->pdev_dev,
			 "max_ordird_qp %d max_ird_adapter %d\n",
			 adap->params.max_ordird_qp,
			 adap->params.max_ird_adapter);
5350 5351 5352 5353 5354 5355

		/* Enable write_with_immediate if FW supports it */
		params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
				      val);
		adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5356 5357 5358 5359 5360 5361

		/* Enable write_cmpl if FW supports it */
		params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
				      val);
		adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5362
		adap->num_ofld_uld += 2;
5363
	}
5364
	if (caps_cmd.iscsicaps) {
5365 5366
		params[0] = FW_PARAM_PFVF(ISCSI_START);
		params[1] = FW_PARAM_PFVF(ISCSI_END);
5367
		ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5368
				      params, val);
5369 5370 5371 5372
		if (ret < 0)
			goto bye;
		adap->vres.iscsi.start = val[0];
		adap->vres.iscsi.size = val[1] - val[0] + 1;
5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388
		if (is_t6(adap->params.chip)) {
			params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
			params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
			ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
					      params, val);
			if (!ret) {
				adap->vres.ppod_edram.start = val[0];
				adap->vres.ppod_edram.size =
					val[1] - val[0] + 1;

				dev_info(adap->pdev_dev,
					 "ppod edram start 0x%x end 0x%x size 0x%x\n",
					 val[0], val[1],
					 adap->vres.ppod_edram.size);
			}
		}
5389 5390
		/* LIO target and cxgb4i initiaitor */
		adap->num_ofld_uld += 2;
5391
	}
5392
	if (caps_cmd.cryptocaps) {
5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412
		if (ntohs(caps_cmd.cryptocaps) &
		    FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
			params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
					      2, params, val);
			if (ret < 0) {
				if (ret != -EINVAL)
					goto bye;
			} else {
				adap->vres.ncrypto_fc = val[0];
			}
			adap->num_ofld_uld += 1;
		}
		if (ntohs(caps_cmd.cryptocaps) &
		    FW_CAPS_CONFIG_TLS_INLINE) {
			params[0] = FW_PARAM_PFVF(TLS_START);
			params[1] = FW_PARAM_PFVF(TLS_END);
			ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
					      2, params, val);
			if (ret < 0)
H
Harsh Jain 已提交
5413
				goto bye;
5414 5415 5416
			adap->vres.key.start = val[0];
			adap->vres.key.size = val[1] - val[0] + 1;
			adap->num_uld += 1;
H
Harsh Jain 已提交
5417
		}
5418
		adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5419
	}
5420

5421 5422 5423 5424
	/* The MTU/MSS Table is initialized by now, so load their values.  If
	 * we're initializing the adapter, then we'll make any modifications
	 * we want to the MTU/MSS Table and also initialize the congestion
	 * parameters.
5425
	 */
5426
	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451
	if (state != DEV_STATE_INIT) {
		int i;

		/* The default MTU Table contains values 1492 and 1500.
		 * However, for TCP, it's better to have two values which are
		 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
		 * This allows us to have a TCP Data Payload which is a
		 * multiple of 8 regardless of what combination of TCP Options
		 * are in use (always a multiple of 4 bytes) which is
		 * important for performance reasons.  For instance, if no
		 * options are in use, then we have a 20-byte IP header and a
		 * 20-byte TCP header.  In this case, a 1500-byte MSS would
		 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
		 * which is not a multiple of 8.  So using an MSS of 1488 in
		 * this case results in a TCP Data Payload of 1448 bytes which
		 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
		 * Stamps have been negotiated, then an MTU of 1500 bytes
		 * results in a TCP Data Payload of 1448 bytes which, as
		 * above, is a multiple of 8 bytes ...
		 */
		for (i = 0; i < NMTUS; i++)
			if (adap->params.mtus[i] == 1492) {
				adap->params.mtus[i] = 1488;
				break;
			}
5452

5453 5454 5455
		t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
			     adap->params.b_wnd);
	}
5456
	t4_init_sge_params(adap);
5457
	adap->flags |= CXGB4_FW_OK;
5458
	t4_init_tp_params(adap, true);
5459 5460 5461
	return 0;

	/*
5462 5463 5464
	 * Something bad happened.  If a command timed out or failed with EIO
	 * FW does not operate within its spec or something catastrophic
	 * happened to HW/FW, stop issuing commands.
5465
	 */
5466
bye:
A
Arjun Vynipadath 已提交
5467
	adap_free_hma_mem(adap);
5468 5469 5470 5471
	kfree(adap->sge.egr_map);
	kfree(adap->sge.ingr_map);
	kfree(adap->sge.starving_fl);
	kfree(adap->sge.txq_maperr);
5472 5473 5474
#ifdef CONFIG_DEBUG_FS
	kfree(adap->sge.blocked_fl);
#endif
5475 5476
	if (ret != -ETIMEDOUT && ret != -EIO)
		t4_fw_bye(adap, adap->mbox);
5477 5478 5479
	return ret;
}

D
Dimitris Michailidis 已提交
5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491
/* EEH callbacks */

static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
					 pci_channel_state_t state)
{
	int i;
	struct adapter *adap = pci_get_drvdata(pdev);

	if (!adap)
		goto out;

	rtnl_lock();
5492
	adap->flags &= ~CXGB4_FW_OK;
D
Dimitris Michailidis 已提交
5493
	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5494
	spin_lock(&adap->stats_lock);
D
Dimitris Michailidis 已提交
5495 5496
	for_each_port(adap, i) {
		struct net_device *dev = adap->port[i];
5497 5498 5499 5500
		if (dev) {
			netif_device_detach(dev);
			netif_carrier_off(dev);
		}
D
Dimitris Michailidis 已提交
5501
	}
5502
	spin_unlock(&adap->stats_lock);
5503
	disable_interrupts(adap);
5504
	if (adap->flags & CXGB4_FULL_INIT_DONE)
D
Dimitris Michailidis 已提交
5505 5506
		cxgb_down(adap);
	rtnl_unlock();
5507
	if ((adap->flags & CXGB4_DEV_ENABLED)) {
5508
		pci_disable_device(pdev);
5509
		adap->flags &= ~CXGB4_DEV_ENABLED;
5510
	}
D
Dimitris Michailidis 已提交
5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526
out:	return state == pci_channel_io_perm_failure ?
		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
{
	int i, ret;
	struct fw_caps_config_cmd c;
	struct adapter *adap = pci_get_drvdata(pdev);

	if (!adap) {
		pci_restore_state(pdev);
		pci_save_state(pdev);
		return PCI_ERS_RESULT_RECOVERED;
	}

5527
	if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5528 5529 5530 5531 5532
		if (pci_enable_device(pdev)) {
			dev_err(&pdev->dev, "Cannot reenable PCI "
					    "device after reset\n");
			return PCI_ERS_RESULT_DISCONNECT;
		}
5533
		adap->flags |= CXGB4_DEV_ENABLED;
D
Dimitris Michailidis 已提交
5534 5535 5536 5537 5538 5539
	}

	pci_set_master(pdev);
	pci_restore_state(pdev);
	pci_save_state(pdev);

5540
	if (t4_wait_dev_ready(adap->regs) < 0)
D
Dimitris Michailidis 已提交
5541
		return PCI_ERS_RESULT_DISCONNECT;
5542
	if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
D
Dimitris Michailidis 已提交
5543
		return PCI_ERS_RESULT_DISCONNECT;
5544
	adap->flags |= CXGB4_FW_OK;
D
Dimitris Michailidis 已提交
5545 5546 5547 5548
	if (adap_init1(adap, &c))
		return PCI_ERS_RESULT_DISCONNECT;

	for_each_port(adap, i) {
5549 5550
		struct port_info *pi = adap2pinfo(adap, i);
		u8 vivld = 0, vin = 0;
D
Dimitris Michailidis 已提交
5551

5552 5553
		ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
				  NULL, NULL, &vivld, &vin);
D
Dimitris Michailidis 已提交
5554 5555
		if (ret < 0)
			return PCI_ERS_RESULT_DISCONNECT;
5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568
		pi->viid = ret;
		pi->xact_addr_filt = -1;
		/* If fw supports returning the VIN as part of FW_VI_CMD,
		 * save the returned values.
		 */
		if (adap->params.viid_smt_extn_support) {
			pi->vivld = vivld;
			pi->vin = vin;
		} else {
			/* Retrieve the values from VIID */
			pi->vivld = FW_VIID_VIVLD_G(pi->viid);
			pi->vin = FW_VIID_VIN_G(pi->viid);
		}
D
Dimitris Michailidis 已提交
5569 5570 5571 5572
	}

	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
		     adap->params.b_wnd);
5573
	setup_memwin(adap);
D
Dimitris Michailidis 已提交
5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589
	if (cxgb_up(adap))
		return PCI_ERS_RESULT_DISCONNECT;
	return PCI_ERS_RESULT_RECOVERED;
}

static void eeh_resume(struct pci_dev *pdev)
{
	int i;
	struct adapter *adap = pci_get_drvdata(pdev);

	if (!adap)
		return;

	rtnl_lock();
	for_each_port(adap, i) {
		struct net_device *dev = adap->port[i];
5590 5591 5592 5593 5594 5595
		if (dev) {
			if (netif_running(dev)) {
				link_start(dev);
				cxgb_set_rxmode(dev);
			}
			netif_device_attach(dev);
D
Dimitris Michailidis 已提交
5596 5597 5598 5599 5600
		}
	}
	rtnl_unlock();
}

V
Vishal Kulkarni 已提交
5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681
static void eeh_reset_prepare(struct pci_dev *pdev)
{
	struct adapter *adapter = pci_get_drvdata(pdev);
	int i;

	if (adapter->pf != 4)
		return;

	adapter->flags &= ~CXGB4_FW_OK;

	notify_ulds(adapter, CXGB4_STATE_DOWN);

	for_each_port(adapter, i)
		if (adapter->port[i]->reg_state == NETREG_REGISTERED)
			cxgb_close(adapter->port[i]);

	disable_interrupts(adapter);
	cxgb4_free_mps_ref_entries(adapter);

	adap_free_hma_mem(adapter);

	if (adapter->flags & CXGB4_FULL_INIT_DONE)
		cxgb_down(adapter);
}

static void eeh_reset_done(struct pci_dev *pdev)
{
	struct adapter *adapter = pci_get_drvdata(pdev);
	int err, i;

	if (adapter->pf != 4)
		return;

	err = t4_wait_dev_ready(adapter->regs);
	if (err < 0) {
		dev_err(adapter->pdev_dev,
			"Device not ready, err %d", err);
		return;
	}

	setup_memwin(adapter);

	err = adap_init0(adapter, 1);
	if (err) {
		dev_err(adapter->pdev_dev,
			"Adapter init failed, err %d", err);
		return;
	}

	setup_memwin_rdma(adapter);

	if (adapter->flags & CXGB4_FW_OK) {
		err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
		if (err) {
			dev_err(adapter->pdev_dev,
				"Port init failed, err %d", err);
			return;
		}
	}

	err = cfg_queues(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"Config queues failed, err %d", err);
		return;
	}

	cxgb4_init_mps_ref_entries(adapter);

	err = setup_fw_sge_queues(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"FW sge queue allocation failed, err %d", err);
		return;
	}

	for_each_port(adapter, i)
		if (adapter->port[i]->reg_state == NETREG_REGISTERED)
			cxgb_open(adapter->port[i]);
}

5682
static const struct pci_error_handlers cxgb4_eeh = {
D
Dimitris Michailidis 已提交
5683 5684 5685
	.error_detected = eeh_err_detected,
	.slot_reset     = eeh_slot_reset,
	.resume         = eeh_resume,
V
Vishal Kulkarni 已提交
5686 5687
	.reset_prepare  = eeh_reset_prepare,
	.reset_done     = eeh_reset_done,
D
Dimitris Michailidis 已提交
5688 5689
};

5690 5691 5692
/* Return true if the Link Configuration supports "High Speeds" (those greater
 * than 1Gb/s).
 */
5693
static inline bool is_x_10g_port(const struct link_config *lc)
5694
{
5695 5696
	unsigned int speeds, high_speeds;

5697 5698 5699
	speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
	high_speeds = speeds &
			~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5700 5701

	return high_speeds != 0;
5702 5703
}

5704
/* Perform default configuration of DMA queues depending on the number and type
5705 5706 5707
 * of ports we found and the number of available CPUs.  Most settings can be
 * modified by the admin prior to actual use.
 */
5708
static int cfg_queues(struct adapter *adap)
5709
{
5710
	u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5711
	u32 ncpus = num_online_cpus();
5712
	u32 niqflint, neq, num_ulds;
5713
	struct sge *s = &adap->sge;
5714
	u32 i, n10g = 0, qidx = 0;
5715
	u32 q10g = 0, q1g;
5716

5717
	/* Reduce memory usage in kdump environment, disable all offload. */
5718
	if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5719
		adap->params.offload = 0;
5720
		adap->params.crypto = 0;
5721
		adap->params.ethofld = 0;
5722 5723
	}

5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736
	/* Calculate the number of Ethernet Queue Sets available based on
	 * resources provisioned for us.  We always have an Asynchronous
	 * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
	 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
	 * Ingress Queue.  Meanwhile, we need two Egress Queues for each
	 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
	 *
	 * Note that we should also take into account all of the various
	 * Offload Queues.  But, in any situation where we're operating in
	 * a Resource Constrained Provisioning environment, doing any Offload
	 * at all is problematic ...
	 */
	niqflint = adap->params.pfres.niqflint - 1;
5737
	if (!(adap->flags & CXGB4_USING_MSIX))
5738 5739
		niqflint--;
	neq = adap->params.pfres.neq / 2;
5740
	avail_qsets = min(niqflint, neq);
5741

5742
	if (avail_qsets < adap->params.nports) {
5743
		dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5744
			avail_qsets, adap->params.nports);
5745 5746 5747 5748 5749 5750 5751
		return -ENOMEM;
	}

	/* Count the number of 10Gb/s or better ports */
	for_each_port(adap, i)
		n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);

5752
	avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5753 5754 5755 5756 5757 5758 5759

	/* We default to 1 queue per non-10G port and up to # of cores queues
	 * per 10G port.
	 */
	if (n10g)
		q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;

5760 5761 5762 5763 5764
#ifdef CONFIG_CHELSIO_T4_DCB
	/* For Data Center Bridging support we need to be able to support up
	 * to 8 Traffic Priorities; each of which will be assigned to its
	 * own TX Queue in order to prevent Head-Of-Line Blocking.
	 */
5765
	q1g = 8;
5766 5767 5768 5769
	if (adap->params.nports * 8 > avail_eth_qsets) {
		dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
			avail_eth_qsets, adap->params.nports * 8);
		return -ENOMEM;
5770
	}
5771

5772 5773 5774 5775
	if (adap->params.nports * ncpus < avail_eth_qsets)
		q10g = max(8U, ncpus);
	else
		q10g = max(8U, q10g);
5776

5777 5778
	while ((q10g * n10g) >
	       (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5779
		q10g--;
5780

5781 5782 5783 5784 5785
#else /* !CONFIG_CHELSIO_T4_DCB */
	q1g = 1;
	q10g = min(q10g, ncpus);
#endif /* !CONFIG_CHELSIO_T4_DCB */
	if (is_kdump_kernel()) {
5786
		q10g = 1;
5787 5788
		q1g = 1;
	}
5789

5790 5791 5792 5793
	for_each_port(adap, i) {
		struct port_info *pi = adap2pinfo(adap, i);

		pi->first_qset = qidx;
5794
		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5795 5796 5797 5798 5799
		qidx += pi->nqsets;
	}

	s->ethqsets = qidx;
	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5800
	avail_qsets -= qidx;
5801

5802
	if (is_uld(adap)) {
5803
		/* For offload we use 1 queue/channel if all ports are up to 1G,
5804 5805 5806
		 * otherwise we divide all available queues amongst the channels
		 * capped by the number of available cores.
		 */
5807
		num_ulds = adap->num_uld + adap->num_ofld_uld;
5808
		i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5809 5810 5811 5812 5813 5814
		avail_uld_qsets = roundup(i, adap->params.nports);
		if (avail_qsets < num_ulds * adap->params.nports) {
			adap->params.offload = 0;
			adap->params.crypto = 0;
			s->ofldqsets = 0;
		} else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5815
			s->ofldqsets = adap->params.nports;
5816 5817
		} else {
			s->ofldqsets = avail_uld_qsets;
5818
		}
5819 5820

		avail_qsets -= num_ulds * s->ofldqsets;
5821 5822
	}

5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835
	/* ETHOFLD Queues used for QoS offload should follow same
	 * allocation scheme as normal Ethernet Queues.
	 */
	if (is_ethofld(adap)) {
		if (avail_qsets < s->max_ethqsets) {
			adap->params.ethofld = 0;
			s->eoqsets = 0;
		} else {
			s->eoqsets = s->max_ethqsets;
		}
		avail_qsets -= s->eoqsets;
	}

5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848
	/* Mirror queues must follow same scheme as normal Ethernet
	 * Queues, when there are enough queues available. Otherwise,
	 * allocate at least 1 queue per port. If even 1 queue is not
	 * available, then disable mirror queues support.
	 */
	if (avail_qsets >= s->max_ethqsets)
		s->mirrorqsets = s->max_ethqsets;
	else if (avail_qsets >= adap->params.nports)
		s->mirrorqsets = adap->params.nports;
	else
		s->mirrorqsets = 0;
	avail_qsets -= s->mirrorqsets;

5849 5850 5851
	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
		struct sge_eth_rxq *r = &s->ethrxq[i];

5852
		init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5853 5854 5855 5856 5857 5858 5859 5860 5861
		r->fl.size = 72;
	}

	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
		s->ethtxq[i].q.size = 1024;

	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
		s->ctrlq[i].q.size = 512;

5862 5863 5864
	if (!is_t4(adap->params.chip))
		s->ptptxq.q.size = 8;

5865
	init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5866
	init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5867 5868

	return 0;
5869 5870 5871 5872 5873 5874
}

/*
 * Reduce the number of Ethernet queues across all ports to at most n.
 * n provides at least one queue per port.
 */
B
Bill Pemberton 已提交
5875
static void reduce_ethqs(struct adapter *adap, int n)
5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898
{
	int i;
	struct port_info *pi;

	while (n < adap->sge.ethqsets)
		for_each_port(adap, i) {
			pi = adap2pinfo(adap, i);
			if (pi->nqsets > 1) {
				pi->nqsets--;
				adap->sge.ethqsets--;
				if (adap->sge.ethqsets <= n)
					break;
			}
		}

	n = 0;
	for_each_port(adap, i) {
		pi = adap2pinfo(adap, i);
		pi->first_qset = n;
		n += pi->nqsets;
	}
}

5899
static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5900
{
5901
	struct msix_info *msix_info;
5902

5903
	msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5904 5905 5906
	if (!msix_info)
		return -ENOMEM;

5907 5908 5909
	adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
					    sizeof(long), GFP_KERNEL);
	if (!adap->msix_bmap.msix_bmap) {
5910 5911 5912
		kfree(msix_info);
		return -ENOMEM;
	}
5913 5914 5915 5916 5917

	spin_lock_init(&adap->msix_bmap.lock);
	adap->msix_bmap.mapsize = num_vec;

	adap->msix_info = msix_info;
5918 5919 5920 5921 5922
	return 0;
}

static void free_msix_info(struct adapter *adap)
{
5923 5924 5925 5926 5927 5928 5929 5930 5931
	kfree(adap->msix_bmap.msix_bmap);
	kfree(adap->msix_info);
}

int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
{
	struct msix_bmap *bmap = &adap->msix_bmap;
	unsigned int msix_idx;
	unsigned long flags;
5932

5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954
	spin_lock_irqsave(&bmap->lock, flags);
	msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
	if (msix_idx < bmap->mapsize) {
		__set_bit(msix_idx, bmap->msix_bmap);
	} else {
		spin_unlock_irqrestore(&bmap->lock, flags);
		return -ENOSPC;
	}

	spin_unlock_irqrestore(&bmap->lock, flags);
	return msix_idx;
}

void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
				 unsigned int msix_idx)
{
	struct msix_bmap *bmap = &adap->msix_bmap;
	unsigned long flags;

	spin_lock_irqsave(&bmap->lock, flags);
	__clear_bit(msix_idx, bmap->msix_bmap);
	spin_unlock_irqrestore(&bmap->lock, flags);
5955 5956
}

5957 5958 5959
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
#define EXTRA_VECS 2

B
Bill Pemberton 已提交
5960
static int enable_msix(struct adapter *adap)
5961
{
5962 5963
	u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
	u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5964 5965
	u8 num_uld = 0, nchan = adap->params.nports;
	u32 i, want, need, num_vec;
5966
	struct sge *s = &adap->sge;
5967
	struct msix_entry *entries;
5968 5969
	struct port_info *pi;
	int allocated, ret;
5970

5971
	want = s->max_ethqsets;
5972 5973 5974 5975
#ifdef CONFIG_CHELSIO_T4_DCB
	/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
	 * each port.
	 */
5976
	need = 8 * nchan;
5977
#else
5978
	need = nchan;
5979
#endif
5980 5981 5982 5983 5984 5985 5986 5987
	eth_need = need;
	if (is_uld(adap)) {
		num_uld = adap->num_ofld_uld + adap->num_uld;
		want += num_uld * s->ofldqsets;
		uld_need = num_uld * nchan;
		need += uld_need;
	}

5988 5989 5990 5991 5992 5993
	if (is_ethofld(adap)) {
		want += s->eoqsets;
		ethofld_need = eth_need;
		need += ethofld_need;
	}

5994 5995 5996 5997 5998 5999
	if (s->mirrorqsets) {
		want += s->mirrorqsets;
		mirror_need = nchan;
		need += mirror_need;
	}

6000 6001 6002 6003 6004 6005 6006 6007 6008 6009
	want += EXTRA_VECS;
	need += EXTRA_VECS;

	entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

	for (i = 0; i < want; i++)
		entries[i].entry = i;

6010 6011
	allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
	if (allocated < 0) {
6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031
		/* Disable offload and attempt to get vectors for NIC
		 * only mode.
		 */
		want = s->max_ethqsets + EXTRA_VECS;
		need = eth_need + EXTRA_VECS;
		allocated = pci_enable_msix_range(adap->pdev, entries,
						  need, want);
		if (allocated < 0) {
			dev_info(adap->pdev_dev,
				 "Disabling MSI-X due to insufficient MSI-X vectors\n");
			ret = allocated;
			goto out_free;
		}

		dev_info(adap->pdev_dev,
			 "Disabling offload due to insufficient MSI-X vectors\n");
		adap->params.offload = 0;
		adap->params.crypto = 0;
		adap->params.ethofld = 0;
		s->ofldqsets = 0;
6032
		s->eoqsets = 0;
6033
		s->mirrorqsets = 0;
6034
		uld_need = 0;
6035
		ethofld_need = 0;
6036
		mirror_need = 0;
6037
	}
6038

6039 6040 6041 6042 6043 6044 6045 6046 6047
	num_vec = allocated;
	if (num_vec < want) {
		/* Distribute available vectors to the various queue groups.
		 * Every group gets its minimum requirement and NIC gets top
		 * priority for leftovers.
		 */
		ethqsets = eth_need;
		if (is_uld(adap))
			ofldqsets = nchan;
6048 6049
		if (is_ethofld(adap))
			eoqsets = ethofld_need;
6050 6051
		if (s->mirrorqsets)
			mirrorqsets = mirror_need;
6052 6053 6054

		num_vec -= need;
		while (num_vec) {
6055
			if (num_vec < eth_need + ethofld_need ||
6056 6057 6058 6059 6060 6061 6062 6063 6064 6065
			    ethqsets > s->max_ethqsets)
				break;

			for_each_port(adap, i) {
				pi = adap2pinfo(adap, i);
				if (pi->nqsets < 2)
					continue;

				ethqsets++;
				num_vec--;
6066 6067 6068 6069
				if (ethofld_need) {
					eoqsets++;
					num_vec--;
				}
6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082
			}
		}

		if (is_uld(adap)) {
			while (num_vec) {
				if (num_vec < uld_need ||
				    ofldqsets > s->ofldqsets)
					break;

				ofldqsets++;
				num_vec -= uld_need;
			}
		}
6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093

		if (s->mirrorqsets) {
			while (num_vec) {
				if (num_vec < mirror_need ||
				    mirrorqsets > s->mirrorqsets)
					break;

				mirrorqsets++;
				num_vec -= mirror_need;
			}
		}
6094 6095 6096 6097
	} else {
		ethqsets = s->max_ethqsets;
		if (is_uld(adap))
			ofldqsets = s->ofldqsets;
6098 6099
		if (is_ethofld(adap))
			eoqsets = s->eoqsets;
6100 6101
		if (s->mirrorqsets)
			mirrorqsets = s->mirrorqsets;
6102 6103 6104 6105 6106
	}

	if (ethqsets < s->max_ethqsets) {
		s->max_ethqsets = ethqsets;
		reduce_ethqs(adap, ethqsets);
6107
	}
6108

6109
	if (is_uld(adap)) {
6110 6111
		s->ofldqsets = ofldqsets;
		s->nqs_per_uld = s->ofldqsets;
6112 6113
	}

6114 6115 6116
	if (is_ethofld(adap))
		s->eoqsets = eoqsets;

6117 6118 6119 6120 6121 6122 6123 6124 6125
	if (s->mirrorqsets) {
		s->mirrorqsets = mirrorqsets;
		for_each_port(adap, i) {
			pi = adap2pinfo(adap, i);
			pi->nmirrorqsets = s->mirrorqsets / nchan;
			mutex_init(&pi->vi_mirror_mutex);
		}
	}

6126 6127 6128 6129 6130 6131
	/* map for msix */
	ret = alloc_msix_info(adap, allocated);
	if (ret)
		goto out_disable_msix;

	for (i = 0; i < allocated; i++) {
6132
		adap->msix_info[i].vec = entries[i].vector;
6133
		adap->msix_info[i].idx = i;
6134
	}
6135 6136

	dev_info(adap->pdev_dev,
6137 6138 6139
		 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
		 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
		 s->mirrorqsets);
6140

6141
	kfree(entries);
6142
	return 0;
6143 6144 6145 6146 6147 6148 6149

out_disable_msix:
	pci_disable_msix(adap->pdev);

out_free:
	kfree(entries);
	return ret;
6150 6151 6152 6153
}

#undef EXTRA_VECS

B
Bill Pemberton 已提交
6154
static int init_rss(struct adapter *adap)
6155
{
6156 6157 6158 6159 6160 6161
	unsigned int i;
	int err;

	err = t4_init_rss_mode(adap, adap->mbox);
	if (err)
		return err;
6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172

	for_each_port(adap, i) {
		struct port_info *pi = adap2pinfo(adap, i);

		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
		if (!pi->rss)
			return -ENOMEM;
	}
	return 0;
}

6173 6174 6175
/* Dump basic information about the adapter */
static void print_adapter_info(struct adapter *adapter)
{
6176 6177
	/* Hardware/Firmware/etc. Version/Revision IDs */
	t4_dump_version_info(adapter);
6178 6179 6180 6181

	/* Software/Hardware configuration */
	dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
		 is_offload(adapter) ? "R" : "",
6182 6183
		 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
		  (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6184 6185 6186
		 is_offload(adapter) ? "Offload" : "non-Offload");
}

B
Bill Pemberton 已提交
6187
static void print_port_info(const struct net_device *dev)
6188 6189
{
	char buf[80];
6190 6191 6192
	char *bufp = buf;
	const struct port_info *pi = netdev_priv(dev);
	const struct adapter *adap = pi->adapter;
6193

6194
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6195
		bufp += sprintf(bufp, "100M/");
6196
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6197
		bufp += sprintf(bufp, "1G/");
6198
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6199
		bufp += sprintf(bufp, "10G/");
6200
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6201
		bufp += sprintf(bufp, "25G/");
6202
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6203
		bufp += sprintf(bufp, "40G/");
6204 6205 6206
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
		bufp += sprintf(bufp, "50G/");
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6207
		bufp += sprintf(bufp, "100G/");
6208 6209 6210 6211
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
		bufp += sprintf(bufp, "200G/");
	if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
		bufp += sprintf(bufp, "400G/");
6212 6213
	if (bufp != buf)
		--bufp;
6214
	sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6215

6216 6217
	netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
		    dev->name, adap->params.vpd.id, adap->name, buf);
6218 6219
}

6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230
/*
 * Free the following resources:
 * - memory used for tables
 * - MSI/MSI-X
 * - net devices
 * - resources FW is holding for us
 */
static void free_some_resources(struct adapter *adapter)
{
	unsigned int i;

6231
	kvfree(adapter->smt);
6232
	kvfree(adapter->l2t);
6233
	kvfree(adapter->srq);
6234
	t4_cleanup_sched(adapter);
6235
	kvfree(adapter->tids.tid_tab);
6236
	cxgb4_cleanup_tc_matchall(adapter);
6237
	cxgb4_cleanup_tc_mqprio(adapter);
6238
	cxgb4_cleanup_tc_flower(adapter);
6239
	cxgb4_cleanup_tc_u32(adapter);
6240
	cxgb4_cleanup_ethtool_filters(adapter);
6241 6242 6243 6244
	kfree(adapter->sge.egr_map);
	kfree(adapter->sge.ingr_map);
	kfree(adapter->sge.starving_fl);
	kfree(adapter->sge.txq_maperr);
6245 6246 6247
#ifdef CONFIG_DEBUG_FS
	kfree(adapter->sge.blocked_fl);
#endif
6248 6249 6250
	disable_msi(adapter);

	for_each_port(adapter, i)
6251
		if (adapter->port[i]) {
6252 6253 6254 6255 6256
			struct port_info *pi = adap2pinfo(adapter, i);

			if (pi->viid != 0)
				t4_free_vi(adapter, adapter->mbox, adapter->pf,
					   0, pi->viid);
6257
			kfree(adap2pinfo(adapter, i)->rss);
6258
			free_netdev(adapter->port[i]);
6259
		}
6260
	if (adapter->flags & CXGB4_FW_OK)
6261
		t4_fw_bye(adapter, adapter->pf);
6262 6263
}

6264 6265
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
		   NETIF_F_GSO_UDP_L4)
6266
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6267
		   NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6268
#define SEGMENT_SIZE 128
6269

G
Ganesh Goudar 已提交
6270
static int t4_get_chip_type(struct adapter *adap, int ver)
6271
{
G
Ganesh Goudar 已提交
6272
	u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
6273

G
Ganesh Goudar 已提交
6274
	switch (ver) {
6275
	case CHELSIO_T4:
6276
		return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6277
	case CHELSIO_T5:
6278
		return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6279
	case CHELSIO_T6:
6280
		return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6281
	default:
G
Ganesh Goudar 已提交
6282
		break;
6283
	}
6284
	return -EINVAL;
6285 6286
}

6287
#ifdef CONFIG_PCI_IOV
G
Ganesh Goudar 已提交
6288
static void cxgb4_mgmt_setup(struct net_device *dev)
6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302
{
	dev->type = ARPHRD_NONE;
	dev->mtu = 0;
	dev->hard_header_len = 0;
	dev->addr_len = 0;
	dev->tx_queue_len = 0;
	dev->flags |= IFF_NOARP;
	dev->priv_flags |= IFF_NO_QUEUE;

	/* Initialize the device structure. */
	dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
	dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
}

6303 6304
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
6305
	struct adapter *adap = pci_get_drvdata(pdev);
6306 6307 6308 6309
	int err = 0;
	int current_vfs = pci_num_vf(pdev);
	u32 pcie_fw;

6310
	pcie_fw = readl(adap->regs + PCIE_FW_A);
6311 6312 6313
	/* Check if fw is initialized */
	if (!(pcie_fw & PCIE_FW_INIT_F)) {
		dev_warn(&pdev->dev, "Device not initialized\n");
6314 6315 6316 6317 6318 6319 6320 6321 6322
		return -EOPNOTSUPP;
	}

	/* If any of the VF's is already assigned to Guest OS, then
	 * SRIOV for the same cannot be modified
	 */
	if (current_vfs && pci_vfs_assigned(pdev)) {
		dev_err(&pdev->dev,
			"Cannot modify SR-IOV while VFs are assigned\n");
G
Ganesh Goudar 已提交
6323
		return current_vfs;
6324
	}
G
Ganesh Goudar 已提交
6325 6326 6327
	/* Note that the upper-level code ensures that we're never called with
	 * a non-zero "num_vfs" when we already have VFs instantiated.  But
	 * it never hurts to code defensively.
6328
	 */
G
Ganesh Goudar 已提交
6329 6330 6331 6332 6333 6334 6335 6336
	if (num_vfs != 0 && current_vfs != 0)
		return -EBUSY;

	/* Nothing to do for no change. */
	if (num_vfs == current_vfs)
		return num_vfs;

	/* Disable SRIOV when zero is passed. */
6337 6338
	if (!num_vfs) {
		pci_disable_sriov(pdev);
G
Ganesh Goudar 已提交
6339 6340 6341 6342 6343
		/* free VF Management Interface */
		unregister_netdev(adap->port[0]);
		free_netdev(adap->port[0]);
		adap->port[0] = NULL;

6344
		/* free VF resources */
G
Ganesh Goudar 已提交
6345
		adap->num_vfs = 0;
6346 6347
		kfree(adap->vfinfo);
		adap->vfinfo = NULL;
G
Ganesh Goudar 已提交
6348
		return 0;
6349 6350
	}

G
Ganesh Goudar 已提交
6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366
	if (!current_vfs) {
		struct fw_pfvf_cmd port_cmd, port_rpl;
		struct net_device *netdev;
		unsigned int pmask, port;
		struct pci_dev *pbridge;
		struct port_info *pi;
		char name[IFNAMSIZ];
		u32 devcap2;
		u16 flags;

		/* If we want to instantiate Virtual Functions, then our
		 * parent bridge's PCI-E needs to support Alternative Routing
		 * ID (ARI) because our VFs will show up at function offset 8
		 * and above.
		 */
		pbridge = pdev->bus->self;
6367 6368
		pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
		pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
G
Ganesh Goudar 已提交
6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389

		if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
		    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
			/* Our parent bridge does not support ARI so issue a
			 * warning and skip instantiating the VFs.  They
			 * won't be reachable.
			 */
			dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
				 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
				 PCI_FUNC(pbridge->devfn));
			return -ENOTSUPP;
		}
		memset(&port_cmd, 0, sizeof(port_cmd));
		port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
						 FW_CMD_REQUEST_F |
						 FW_CMD_READ_F |
						 FW_PFVF_CMD_PFN_V(adap->pf) |
						 FW_PFVF_CMD_VFN_V(0));
		port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
		err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
				 &port_rpl);
6390 6391
		if (err)
			return err;
G
Ganesh Goudar 已提交
6392 6393 6394 6395 6396 6397 6398 6399 6400
		pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
		port = ffs(pmask) - 1;
		/* Allocate VF Management Interface. */
		snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
			 adap->pf);
		netdev = alloc_netdev(sizeof(struct port_info),
				      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
		if (!netdev)
			return -ENOMEM;
6401

G
Ganesh Goudar 已提交
6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415
		pi = netdev_priv(netdev);
		pi->adapter = adap;
		pi->lport = port;
		pi->tx_chan = port;
		SET_NETDEV_DEV(netdev, &pdev->dev);

		adap->port[0] = netdev;
		pi->port_id = 0;

		err = register_netdev(adap->port[0]);
		if (err) {
			pr_info("Unable to register VF mgmt netdev %s\n", name);
			free_netdev(adap->port[0]);
			adap->port[0] = NULL;
6416
			return err;
G
Ganesh Goudar 已提交
6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440
		}
		/* Allocate and set up VF Information. */
		adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
				       sizeof(struct vf_info), GFP_KERNEL);
		if (!adap->vfinfo) {
			unregister_netdev(adap->port[0]);
			free_netdev(adap->port[0]);
			adap->port[0] = NULL;
			return -ENOMEM;
		}
		cxgb4_mgmt_fill_vf_station_mac_addr(adap);
	}
	/* Instantiate the requested number of VFs. */
	err = pci_enable_sriov(pdev, num_vfs);
	if (err) {
		pr_info("Unable to instantiate %d VFs\n", num_vfs);
		if (!current_vfs) {
			unregister_netdev(adap->port[0]);
			free_netdev(adap->port[0]);
			adap->port[0] = NULL;
			kfree(adap->vfinfo);
			adap->vfinfo = NULL;
		}
		return err;
6441
	}
6442

G
Ganesh Goudar 已提交
6443
	adap->num_vfs = num_vfs;
6444 6445
	return num_vfs;
}
G
Ganesh Goudar 已提交
6446
#endif /* CONFIG_PCI_IOV */
6447

6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520
#if defined(CONFIG_CHELSIO_TLS_DEVICE)

static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
			      enum tls_offload_ctx_dir direction,
			      struct tls_crypto_info *crypto_info,
			      u32 tcp_sn)
{
	struct adapter *adap = netdev2adap(netdev);
	int ret = 0;

	mutex_lock(&uld_mutex);
	if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
		dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
		dev_err(adap->pdev_dev,
			"chcr driver has no registered tlsdev_ops()\n");
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
	if (ret)
		goto out_unlock;

	ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
								  direction,
								  crypto_info,
								  tcp_sn);
	/* if there is a failure, clear the refcount */
	if (ret)
		cxgb4_set_ktls_feature(adap,
				       FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
	mutex_unlock(&uld_mutex);
	return ret;
}

static void cxgb4_ktls_dev_del(struct net_device *netdev,
			       struct tls_context *tls_ctx,
			       enum tls_offload_ctx_dir direction)
{
	struct adapter *adap = netdev2adap(netdev);

	mutex_lock(&uld_mutex);
	if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
		dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
		goto out_unlock;
	}

	if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
		dev_err(adap->pdev_dev,
			"chcr driver has no registered tlsdev_ops\n");
		goto out_unlock;
	}

	adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
							    direction);
	cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);

out_unlock:
	mutex_unlock(&uld_mutex);
}

static const struct tlsdev_ops cxgb4_ktls_ops = {
	.tls_dev_add = cxgb4_ktls_dev_add,
	.tls_dev_del = cxgb4_ktls_dev_del,
};
#endif /* CONFIG_CHELSIO_TLS_DEVICE */

6521
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6522
{
G
Ganesh Goudar 已提交
6523 6524 6525 6526
	struct net_device *netdev;
	struct adapter *adapter;
	static int adap_idx = 1;
	int s_qpp, qpp, num_seg;
6527
	struct port_info *pi;
6528
	bool highdma = false;
6529
	enum chip_type chip;
G
Ganesh Goudar 已提交
6530 6531 6532 6533 6534
	void __iomem *regs;
	int func, chip_ver;
	u16 device_id;
	int i, err;
	u32 whoami;
6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548

	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		/* Just info, some other driver may have claimed the device. */
		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
		return err;
	}

	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "cannot enable PCI device\n");
		goto out_release_regions;
	}

6549 6550 6551 6552 6553 6554 6555
	regs = pci_ioremap_bar(pdev, 0);
	if (!regs) {
		dev_err(&pdev->dev, "cannot map device registers\n");
		err = -ENOMEM;
		goto out_disable_device;
	}

G
Ganesh Goudar 已提交
6556 6557 6558 6559 6560 6561 6562
	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
	if (!adapter) {
		err = -ENOMEM;
		goto out_unmap_bar0;
	}

	adapter->regs = regs;
6563 6564
	err = t4_wait_dev_ready(regs);
	if (err < 0)
6565
		goto out_free_adapter;
6566

6567
	/* We control everything through one PF */
G
Ganesh Goudar 已提交
6568 6569 6570
	whoami = t4_read_reg(adapter, PL_WHOAMI_A);
	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
	chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6571
	if ((int)chip < 0) {
G
Ganesh Goudar 已提交
6572 6573 6574 6575 6576 6577 6578
		dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
		err = chip;
		goto out_free_adapter;
	}
	chip_ver = CHELSIO_CHIP_VERSION(chip);
	func = chip_ver <= CHELSIO_T5 ?
	       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
G
Ganesh Goudar 已提交
6579 6580 6581 6582 6583 6584

	adapter->pdev = pdev;
	adapter->pdev_dev = &pdev->dev;
	adapter->name = pci_name(pdev);
	adapter->mbox = func;
	adapter->pf = func;
6585 6586
	adapter->params.chip = chip;
	adapter->adap_idx = adap_idx;
G
Ganesh Goudar 已提交
6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597
	adapter->msg_enable = DFLT_MSG_ENABLE;
	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
				    (sizeof(struct mbox_cmd) *
				     T4_OS_LOG_MBOX_CMDS),
				    GFP_KERNEL);
	if (!adapter->mbox_log) {
		err = -ENOMEM;
		goto out_free_adapter;
	}
	spin_lock_init(&adapter->mbox_lock);
	INIT_LIST_HEAD(&adapter->mlist.list);
6598
	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
G
Ganesh Goudar 已提交
6599 6600
	pci_set_drvdata(pdev, adapter);

6601 6602 6603
	if (func != ent->driver_data) {
		pci_disable_device(pdev);
		pci_save_state(pdev);        /* to restore SR-IOV later */
G
Ganesh Goudar 已提交
6604
		return 0;
6605 6606
	}

6607
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6608
		highdma = true;
6609 6610 6611 6612
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
				"coherent allocations\n");
G
Ganesh Goudar 已提交
6613
			goto out_free_adapter;
6614 6615 6616 6617 6618
		}
	} else {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pdev->dev, "no usable DMA configuration\n");
G
Ganesh Goudar 已提交
6619
			goto out_free_adapter;
6620 6621 6622 6623 6624 6625
		}
	}

	pci_enable_pcie_error_reporting(pdev);
	pci_set_master(pdev);
	pci_save_state(pdev);
6626
	adap_idx++;
6627 6628 6629 6630 6631 6632
	adapter->workq = create_singlethread_workqueue("cxgb4");
	if (!adapter->workq) {
		err = -ENOMEM;
		goto out_free_adapter;
	}

6633
	/* PCI device has been enabled */
6634
	adapter->flags |= CXGB4_DEV_ENABLED;
6635 6636
	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));

6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651
	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
	 * Ingress Packet Data to Free List Buffers in order to allow for
	 * chipset performance optimizations between the Root Complex and
	 * Memory Controllers.  (Messages to the associated Ingress Queue
	 * notifying new Packet Placement in the Free Lists Buffers will be
	 * send without the Relaxed Ordering Attribute thus guaranteeing that
	 * all preceding PCIe Transaction Layer Packets will be processed
	 * first.)  But some Root Complexes have various issues with Upstream
	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
	 * The PCIe devices which under the Root Complexes will be cleared the
	 * Relaxed Ordering bit in the configuration space, So we check our
	 * PCIe configuration space to see if it's flagged with advice against
	 * using Relaxed Ordering.
	 */
	if (!pcie_relaxed_ordering_enabled(pdev))
6652
		adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6653

6654 6655
	spin_lock_init(&adapter->stats_lock);
	spin_lock_init(&adapter->tid_release_lock);
6656
	spin_lock_init(&adapter->win0_lock);
6657 6658

	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6659 6660
	INIT_WORK(&adapter->db_full_task, process_db_full);
	INIT_WORK(&adapter->db_drop_task, process_db_drop);
6661
	INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6662 6663 6664

	err = t4_prep_adapter(adapter);
	if (err)
6665 6666
		goto out_free_adapter;

6667 6668 6669 6670 6671 6672 6673 6674 6675 6676
	if (is_kdump_kernel()) {
		/* Collect hardware state and append to /proc/vmcore */
		err = cxgb4_cudbg_vmcore_add_dump(adapter);
		if (err) {
			dev_warn(adapter->pdev_dev,
				 "Fail collecting vmcore device dump, err: %d. Continuing\n",
				 err);
			err = 0;
		}
	}
6677

6678
	if (!is_t4(adapter->params.chip)) {
6679 6680
		s_qpp = (QUEUESPERPAGEPF0_S +
			(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6681
			adapter->pf);
6682 6683
		qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
		      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694
		num_seg = PAGE_SIZE / SEGMENT_SIZE;

		/* Each segment size is 128B. Write coalescing is enabled only
		 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
		 * queue is less no of segments that can be accommodated in
		 * a page size.
		 */
		if (qpp > num_seg) {
			dev_err(&pdev->dev,
				"Incorrect number of egress queues per page\n");
			err = -EINVAL;
6695
			goto out_free_adapter;
6696 6697 6698 6699 6700 6701
		}
		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
		pci_resource_len(pdev, 2));
		if (!adapter->bar2) {
			dev_err(&pdev->dev, "cannot map device bar2 region\n");
			err = -ENOMEM;
6702
			goto out_free_adapter;
6703 6704 6705
		}
	}

6706
	setup_memwin(adapter);
V
Vishal Kulkarni 已提交
6707
	err = adap_init0(adapter, 0);
6708 6709 6710
#ifdef CONFIG_DEBUG_FS
	bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
#endif
6711
	setup_memwin_rdma(adapter);
6712 6713 6714
	if (err)
		goto out_unmap_bar;

6715 6716
	/* configure SGE_STAT_CFG_A to read WC stats */
	if (!is_t4(adapter->params.chip))
6717 6718 6719
		t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
			     (is_t5(adapter->params.chip) ? STATMODE_V(0) :
			      T6_STATMODE_V(0)));
6720

6721 6722 6723
	/* Initialize hash mac addr list */
	INIT_LIST_HEAD(&adapter->mac_hlist);

6724
	for_each_port(adapter, i) {
6725 6726 6727 6728 6729 6730
		/* For supporting MQPRIO Offload, need some extra
		 * queues for each ETHOFLD TIDs. Keep it equal to
		 * MAX_ATIDs for now. Once we connect to firmware
		 * later and query the EOTID params, we'll come to
		 * know the actual # of EOTIDs supported.
		 */
6731
		netdev = alloc_etherdev_mq(sizeof(struct port_info),
6732
					   MAX_ETH_QSETS + MAX_ATIDS);
6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746
		if (!netdev) {
			err = -ENOMEM;
			goto out_free_dev;
		}

		SET_NETDEV_DEV(netdev, &pdev->dev);

		adapter->port[i] = netdev;
		pi = netdev_priv(netdev);
		pi->adapter = adapter;
		pi->xact_addr_filt = -1;
		pi->port_id = i;
		netdev->irq = pdev->irq;

6747 6748
		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6749
			NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6750
			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6751
			NETIF_F_HW_TC | NETIF_F_NTUPLE;
6752

G
Ganesh Goudar 已提交
6753
		if (chip_ver > CHELSIO_T5) {
6754 6755 6756 6757
			netdev->hw_enc_features |= NETIF_F_IP_CSUM |
						   NETIF_F_IPV6_CSUM |
						   NETIF_F_RXCSUM |
						   NETIF_F_GSO_UDP_TUNNEL |
6758
						   NETIF_F_GSO_UDP_TUNNEL_CSUM |
6759 6760
						   NETIF_F_TSO | NETIF_F_TSO6;

A
Atul Gupta 已提交
6761
			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6762
					       NETIF_F_GSO_UDP_TUNNEL_CSUM |
A
Atul Gupta 已提交
6763
					       NETIF_F_HW_TLS_RECORD;
6764
		}
6765

6766 6767 6768
		if (highdma)
			netdev->hw_features |= NETIF_F_HIGHDMA;
		netdev->features |= netdev->hw_features;
6769
		netdev->vlan_features = netdev->features & VLAN_FEAT;
6770 6771 6772 6773 6774 6775 6776 6777
#if defined(CONFIG_CHELSIO_TLS_DEVICE)
		if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
			netdev->hw_features |= NETIF_F_HW_TLS_TX;
			netdev->tlsdev_ops = &cxgb4_ktls_ops;
			/* initialize the refcount */
			refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
		}
#endif
6778 6779
		netdev->priv_flags |= IFF_UNICAST_FLT;

6780
		/* MTU range: 81 - 9600 */
6781
		netdev->min_mtu = 81;              /* accommodate SACK */
6782 6783
		netdev->max_mtu = MAX_MTU;

6784
		netdev->netdev_ops = &cxgb4_netdev_ops;
6785 6786 6787
#ifdef CONFIG_CHELSIO_T4_DCB
		netdev->dcbnl_ops = &cxgb4_dcb_ops;
		cxgb4_dcb_state_init(netdev);
6788
		cxgb4_dcb_version_init(netdev);
6789
#endif
6790
		cxgb4_set_ethtool_ops(netdev);
6791 6792
	}

6793 6794
	cxgb4_init_ethtool_dump(adapter);

6795 6796
	pci_set_drvdata(pdev, adapter);

6797
	if (adapter->flags & CXGB4_FW_OK) {
6798
		err = t4_port_init(adapter, func, func, 0);
6799 6800
		if (err)
			goto out_free_dev;
6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816
	} else if (adapter->params.nports == 1) {
		/* If we don't have a connection to the firmware -- possibly
		 * because of an error -- grab the raw VPD parameters so we
		 * can set the proper MAC Address on the debug network
		 * interface that we've created.
		 */
		u8 hw_addr[ETH_ALEN];
		u8 *na = adapter->params.vpd.na;

		err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
		if (!err) {
			for (i = 0; i < ETH_ALEN; i++)
				hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
					      hex2val(na[2 * i + 1]));
			t4_set_hw_addr(adapter, 0, hw_addr);
		}
6817 6818
	}

6819
	if (!(adapter->flags & CXGB4_FW_OK))
6820 6821
		goto fw_attach_fail;

6822
	/* Configure queues and allocate tables now, they can be needed as
6823 6824
	 * soon as the first register_netdev completes.
	 */
6825 6826 6827
	err = cfg_queues(adapter);
	if (err)
		goto out_free_dev;
6828

6829 6830 6831 6832 6833 6834
	adapter->smt = t4_init_smt();
	if (!adapter->smt) {
		/* We tolerate a lack of SMT, giving up some functionality */
		dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
	}

6835
	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6836 6837 6838 6839 6840 6841
	if (!adapter->l2t) {
		/* We tolerate a lack of L2T, giving up some functionality */
		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
		adapter->params.offload = 0;
	}

6842
#if IS_ENABLED(CONFIG_IPV6)
G
Ganesh Goudar 已提交
6843
	if (chip_ver <= CHELSIO_T5 &&
6844 6845 6846
	    (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
		/* CLIP functionality is not present in hardware,
		 * hence disable all offload features
6847 6848
		 */
		dev_warn(&pdev->dev,
6849
			 "CLIP not enabled in hardware, continuing\n");
6850
		adapter->params.offload = 0;
6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861
	} else {
		adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
						  adapter->clipt_end);
		if (!adapter->clipt) {
			/* We tolerate a lack of clip_table, giving up
			 * some functionality
			 */
			dev_warn(&pdev->dev,
				 "could not allocate Clip table, continuing\n");
			adapter->params.offload = 0;
		}
6862 6863
	}
#endif
6864 6865 6866 6867 6868 6869 6870 6871 6872 6873

	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
		if (!pi->sched_tbl)
			dev_warn(&pdev->dev,
				 "could not activate scheduling on port %d\n",
				 i);
	}

6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891
	if (is_offload(adapter) || is_hashfilter(adapter)) {
		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
			u32 v;

			v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
			if (chip_ver <= CHELSIO_T5) {
				adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
				v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
				adapter->tids.hash_base = v / 4;
			} else {
				adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
				v = t4_read_reg(adapter,
						T6_LE_DB_HASH_TID_BASE_A);
				adapter->tids.hash_base = v;
			}
		}
	}

6892
	if (tid_init(&adapter->tids) < 0) {
6893 6894 6895
		dev_warn(&pdev->dev, "could not allocate TID table, "
			 "continuing\n");
		adapter->params.offload = 0;
6896
	} else {
6897
		adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6898 6899 6900
		if (!adapter->tc_u32)
			dev_warn(&pdev->dev,
				 "could not offload tc u32, continuing\n");
6901

6902 6903 6904
		if (cxgb4_init_tc_flower(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc flower, continuing\n");
6905 6906 6907 6908

		if (cxgb4_init_tc_mqprio(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc mqprio, continuing\n");
6909 6910 6911 6912

		if (cxgb4_init_tc_matchall(adapter))
			dev_warn(&pdev->dev,
				 "could not offload tc matchall, continuing\n");
6913 6914 6915
		if (cxgb4_init_ethtool_filters(adapter))
			dev_warn(&pdev->dev,
				 "could not initialize ethtool filters, continuing\n");
6916 6917
	}

6918 6919
	/* See what interrupts we'll be using */
	if (msi > 1 && enable_msix(adapter) == 0)
6920
		adapter->flags |= CXGB4_USING_MSIX;
6921
	else if (msi > 0 && pci_enable_msi(pdev) == 0) {
6922
		adapter->flags |= CXGB4_USING_MSI;
6923 6924 6925
		if (msi > 1)
			free_msix_info(adapter);
	}
6926

6927
	/* check for PCI Express bandwidth capabiltites */
6928
	pcie_print_link_status(pdev);
6929

6930 6931
	cxgb4_init_mps_ref_entries(adapter);

6932 6933 6934 6935
	err = init_rss(adapter);
	if (err)
		goto out_free_dev;

6936 6937 6938 6939 6940 6941 6942
	err = setup_non_data_intr(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"Non Data interrupt allocation failed, err: %d\n", err);
		goto out_free_dev;
	}

6943 6944 6945 6946 6947 6948 6949
	err = setup_fw_sge_queues(adapter);
	if (err) {
		dev_err(adapter->pdev_dev,
			"FW sge queue allocation failed, err %d", err);
		goto out_free_dev;
	}

6950
fw_attach_fail:
6951 6952 6953 6954 6955 6956 6957
	/*
	 * The card is now ready to go.  If any errors occur during device
	 * registration we do not fail the whole card but rather proceed only
	 * with the ports we manage to register successfully.  However we must
	 * register at least one net device.
	 */
	for_each_port(adapter, i) {
6958
		pi = adap2pinfo(adapter, i);
6959
		adapter->port[i]->dev_port = pi->lport;
6960 6961 6962
		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);

6963 6964
		netif_carrier_off(adapter->port[i]);

6965 6966
		err = register_netdev(adapter->port[i]);
		if (err)
6967 6968 6969
			break;
		adapter->chan_map[pi->tx_chan] = i;
		print_port_info(adapter->port[i]);
6970
	}
6971
	if (i == 0) {
6972 6973 6974
		dev_err(&pdev->dev, "could not register any net devices\n");
		goto out_free_dev;
	}
6975 6976 6977
	if (err) {
		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
		err = 0;
6978
	}
6979 6980 6981 6982 6983 6984 6985

	if (cxgb4_debugfs_root) {
		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
							   cxgb4_debugfs_root);
		setup_debugfs(adapter);
	}

D
Divy Le Ray 已提交
6986 6987 6988
	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
	pdev->needs_freset = 1;

6989 6990
	if (is_uld(adapter))
		cxgb4_uld_enable(adapter);
6991

6992 6993 6994
	if (!is_t4(adapter->params.chip))
		cxgb4_ptp_init(adapter);

6995
	if (IS_REACHABLE(CONFIG_THERMAL) &&
6996
	    !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
G
Ganesh Goudar 已提交
6997 6998
		cxgb4_thermal_init(adapter);

6999
	print_adapter_info(adapter);
7000
	return 0;
7001

7002
 out_free_dev:
7003
	t4_free_sge_resources(adapter);
7004
	free_some_resources(adapter);
7005
	if (adapter->flags & CXGB4_USING_MSIX)
7006
		free_msix_info(adapter);
7007 7008
	if (adapter->num_uld || adapter->num_ofld_uld)
		t4_uld_mem_free(adapter);
7009
 out_unmap_bar:
7010
	if (!is_t4(adapter->params.chip))
7011
		iounmap(adapter->bar2);
7012
 out_free_adapter:
7013 7014 7015
	if (adapter->workq)
		destroy_workqueue(adapter->workq);

7016
	kfree(adapter->mbox_log);
7017
	kfree(adapter);
7018 7019
 out_unmap_bar0:
	iounmap(regs);
7020 7021 7022 7023 7024 7025 7026 7027
 out_disable_device:
	pci_disable_pcie_error_reporting(pdev);
	pci_disable_device(pdev);
 out_release_regions:
	pci_release_regions(pdev);
	return err;
}

B
Bill Pemberton 已提交
7028
static void remove_one(struct pci_dev *pdev)
7029 7030
{
	struct adapter *adapter = pci_get_drvdata(pdev);
7031
	struct hash_mac_addr *entry, *tmp;
7032

7033 7034 7035 7036
	if (!adapter) {
		pci_release_regions(pdev);
		return;
	}
7037

7038 7039 7040 7041 7042
	/* If we allocated filters, free up state associated with any
	 * valid filters ...
	 */
	clear_all_filters(adapter);

7043
	adapter->flags |= CXGB4_SHUTTING_DOWN;
7044

7045
	if (adapter->pf == 4) {
7046 7047
		int i;

7048 7049 7050 7051 7052
		/* Tear down per-adapter Work Queue first since it can contain
		 * references to our adapter data structure.
		 */
		destroy_workqueue(adapter->workq);

7053
		if (is_uld(adapter)) {
7054
			detach_ulds(adapter);
7055 7056
			t4_uld_clean_up(adapter);
		}
7057

A
Arjun Vynipadath 已提交
7058 7059
		adap_free_hma_mem(adapter);

7060 7061
		disable_interrupts(adapter);

7062 7063
		cxgb4_free_mps_ref_entries(adapter);

7064
		for_each_port(adapter, i)
D
Dimitris Michailidis 已提交
7065
			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7066 7067
				unregister_netdev(adapter->port[i]);

7068
		debugfs_remove_recursive(adapter->debugfs_root);
7069

7070 7071
		if (!is_t4(adapter->params.chip))
			cxgb4_ptp_stop(adapter);
7072
		if (IS_REACHABLE(CONFIG_THERMAL))
7073
			cxgb4_thermal_remove(adapter);
7074

7075
		if (adapter->flags & CXGB4_FULL_INIT_DONE)
7076
			cxgb_down(adapter);
7077

7078
		if (adapter->flags & CXGB4_USING_MSIX)
7079
			free_msix_info(adapter);
7080 7081
		if (adapter->num_uld || adapter->num_ofld_uld)
			t4_uld_mem_free(adapter);
7082
		free_some_resources(adapter);
7083 7084 7085 7086 7087 7088
		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
					 list) {
			list_del(&entry->list);
			kfree(entry);
		}

7089 7090 7091
#if IS_ENABLED(CONFIG_IPV6)
		t4_cleanup_clip_tbl(adapter);
#endif
7092
		if (!is_t4(adapter->params.chip))
7093
			iounmap(adapter->bar2);
7094 7095 7096
	}
#ifdef CONFIG_PCI_IOV
	else {
G
Ganesh Goudar 已提交
7097
		cxgb4_iov_configure(adapter->pdev, 0);
7098 7099
	}
#endif
7100 7101
	iounmap(adapter->regs);
	pci_disable_pcie_error_reporting(pdev);
7102
	if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7103
		pci_disable_device(pdev);
7104
		adapter->flags &= ~CXGB4_DEV_ENABLED;
7105 7106 7107 7108 7109
	}
	pci_release_regions(pdev);
	kfree(adapter->mbox_log);
	synchronize_rcu();
	kfree(adapter);
7110 7111
}

7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129
/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
 * delivery.  This is essentially a stripped down version of the PCI remove()
 * function where we do the minimal amount of work necessary to shutdown any
 * further activity.
 */
static void shutdown_one(struct pci_dev *pdev)
{
	struct adapter *adapter = pci_get_drvdata(pdev);

	/* As with remove_one() above (see extended comment), we only want do
	 * do cleanup on PCI Devices which went all the way through init_one()
	 * ...
	 */
	if (!adapter) {
		pci_release_regions(pdev);
		return;
	}

7130
	adapter->flags |= CXGB4_SHUTTING_DOWN;
7131

7132 7133 7134 7135 7136 7137 7138
	if (adapter->pf == 4) {
		int i;

		for_each_port(adapter, i)
			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
				cxgb_close(adapter->port[i]);

7139 7140 7141 7142
		rtnl_lock();
		cxgb4_mqprio_stop_offload(adapter);
		rtnl_unlock();

7143 7144 7145 7146 7147
		if (is_uld(adapter)) {
			detach_ulds(adapter);
			t4_uld_clean_up(adapter);
		}

7148 7149 7150 7151
		disable_interrupts(adapter);
		disable_msi(adapter);

		t4_sge_stop(adapter);
7152
		if (adapter->flags & CXGB4_FW_OK)
7153 7154 7155 7156
			t4_fw_bye(adapter, adapter->mbox);
	}
}

7157 7158 7159 7160
static struct pci_driver cxgb4_driver = {
	.name     = KBUILD_MODNAME,
	.id_table = cxgb4_pci_tbl,
	.probe    = init_one,
B
Bill Pemberton 已提交
7161
	.remove   = remove_one,
7162
	.shutdown = shutdown_one,
7163 7164 7165
#ifdef CONFIG_PCI_IOV
	.sriov_configure = cxgb4_iov_configure,
#endif
D
Dimitris Michailidis 已提交
7166
	.err_handler = &cxgb4_eeh,
7167 7168 7169 7170 7171 7172 7173 7174 7175
};

static int __init cxgb4_init_module(void)
{
	int ret;

	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);

	ret = pci_register_driver(&cxgb4_driver);
7176
	if (ret < 0)
7177
		goto err_pci;
7178

7179
#if IS_ENABLED(CONFIG_IPV6)
7180
	if (!inet6addr_registered) {
7181 7182 7183 7184 7185
		ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
		if (ret)
			pci_unregister_driver(&cxgb4_driver);
		else
			inet6addr_registered = true;
7186
	}
7187
#endif
7188

7189 7190 7191 7192 7193 7194
	if (ret == 0)
		return ret;

err_pci:
	debugfs_remove(cxgb4_debugfs_root);

7195 7196 7197 7198 7199
	return ret;
}

static void __exit cxgb4_cleanup_module(void)
{
7200
#if IS_ENABLED(CONFIG_IPV6)
7201
	if (inet6addr_registered) {
7202 7203 7204
		unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
		inet6addr_registered = false;
	}
7205
#endif
7206 7207 7208 7209 7210 7211
	pci_unregister_driver(&cxgb4_driver);
	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
}

module_init(cxgb4_init_module);
module_exit(cxgb4_cleanup_module);