tg3.c 399.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * tg3.c: Broadcom Tigon3 ethernet driver.
 *
 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
 * Copyright (C) 2004 Sun Microsystems Inc.
7
 * Copyright (C) 2005-2010 Broadcom Corporation.
L
Linus Torvalds 已提交
8 9
 *
 * Firmware is:
M
Michael Chan 已提交
10 11 12 13 14 15
 *	Derived from proprietary unpublished source code,
 *	Copyright (C) 2000-2003 Broadcom Corporation.
 *
 *	Permission is hereby granted for the distribution of this firmware
 *	data in hexadecimal or equivalent format, provided this copyright
 *	notice is accompanying it.
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24 25
 */


#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/delay.h>
26
#include <linux/in.h>
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
M
Matt Carlson 已提交
35
#include <linux/phy.h>
M
Matt Carlson 已提交
36
#include <linux/brcmphy.h>
L
Linus Torvalds 已提交
37 38 39 40
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/workqueue.h>
41
#include <linux/prefetch.h>
42
#include <linux/dma-mapping.h>
43
#include <linux/firmware.h>
L
Linus Torvalds 已提交
44 45

#include <net/checksum.h>
46
#include <net/ip.h>
L
Linus Torvalds 已提交
47 48 49 50 51 52

#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>

53
#ifdef CONFIG_SPARC
L
Linus Torvalds 已提交
54
#include <asm/idprom.h>
55
#include <asm/prom.h>
L
Linus Torvalds 已提交
56 57
#endif

M
Matt Carlson 已提交
58 59 60
#define BAR_0	0
#define BAR_2	2

L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68 69
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define TG3_VLAN_TAG_USED 1
#else
#define TG3_VLAN_TAG_USED 0
#endif

#include "tg3.h"

#define DRV_MODULE_NAME		"tg3"
M
Matt Carlson 已提交
70 71
#define DRV_MODULE_VERSION	"3.108"
#define DRV_MODULE_RELDATE	"February 17, 2010"
L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

#define TG3_DEF_MAC_MODE	0
#define TG3_DEF_RX_MODE		0
#define TG3_DEF_TX_MODE		0
#define TG3_DEF_MSG_ENABLE	  \
	(NETIF_MSG_DRV		| \
	 NETIF_MSG_PROBE	| \
	 NETIF_MSG_LINK		| \
	 NETIF_MSG_TIMER	| \
	 NETIF_MSG_IFDOWN	| \
	 NETIF_MSG_IFUP		| \
	 NETIF_MSG_RX_ERR	| \
	 NETIF_MSG_TX_ERR)

/* length of time before we decide the hardware is borked,
 * and dev->tx_timeout() should be called to fix the problem
 */
#define TG3_TX_TIMEOUT			(5 * HZ)

/* hardware minimum and maximum for a single frame's data payload */
#define TG3_MIN_MTU			60
#define TG3_MAX_MTU(tp)	\
94
	((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102 103

/* These numbers seem to be hard coded in the NIC firmware somehow.
 * You can't change the ring sizes, but you can change where you place
 * them in the NIC onboard memory.
 */
#define TG3_RX_RING_SIZE		512
#define TG3_DEF_RX_RING_PENDING		200
#define TG3_RX_JUMBO_RING_SIZE		256
#define TG3_DEF_RX_JUMBO_RING_PENDING	100
M
Matt Carlson 已提交
104
#define TG3_RSS_INDIR_TBL_SIZE 128
L
Linus Torvalds 已提交
105 106 107 108 109 110 111 112

/* Do not place this n-ring entries value into the tp struct itself,
 * we really want to expose these constants to GCC so that modulo et
 * al.  operations are done with shifts and masks instead of with
 * hw multiply/modulo instructions.  Another solution would be to
 * replace things like '% foo' with '& (foo - 1)'.
 */
#define TG3_RX_RCB_RING_SIZE(tp)	\
M
Matt Carlson 已提交
113
	(((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
114
	  !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
L
Linus Torvalds 已提交
115 116 117 118 119 120

#define TG3_TX_RING_SIZE		512
#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)

#define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
				 TG3_RX_RING_SIZE)
M
Matt Carlson 已提交
121 122
#define TG3_RX_JUMBO_RING_BYTES	(sizeof(struct tg3_ext_rx_buffer_desc) * \
				 TG3_RX_JUMBO_RING_SIZE)
L
Linus Torvalds 已提交
123
#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
M
Matt Carlson 已提交
124
				 TG3_RX_RCB_RING_SIZE(tp))
L
Linus Torvalds 已提交
125 126 127 128
#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
				 TG3_TX_RING_SIZE)
#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))

129 130 131 132 133 134 135 136 137
#define TG3_DMA_BYTE_ENAB		64

#define TG3_RX_STD_DMA_SZ		1536
#define TG3_RX_JMB_DMA_SZ		9046

#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)

#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
L
Linus Torvalds 已提交
138

139 140 141 142 143 144
#define TG3_RX_STD_BUFF_RING_SIZE \
	(sizeof(struct ring_info) * TG3_RX_RING_SIZE)

#define TG3_RX_JMB_BUFF_RING_SIZE \
	(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)

L
Linus Torvalds 已提交
145
/* minimum number of free TX descriptors required to wake up TX process */
146
#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
L
Linus Torvalds 已提交
147

M
Matt Carlson 已提交
148 149
#define TG3_RAW_IP_ALIGN 2

L
Linus Torvalds 已提交
150 151 152
/* number of ETHTOOL_GSTATS u64's */
#define TG3_NUM_STATS		(sizeof(struct tg3_ethtool_stats)/sizeof(u64))

153 154
#define TG3_NUM_TEST		6

155 156 157 158
#define FIRMWARE_TG3		"tigon/tg3.bin"
#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"

L
Linus Torvalds 已提交
159
static char version[] __devinitdata =
160
	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
L
Linus Torvalds 已提交
161 162 163 164 165

MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
166 167 168 169
MODULE_FIRMWARE(FIRMWARE_TG3);
MODULE_FIRMWARE(FIRMWARE_TG3TSO);
MODULE_FIRMWARE(FIRMWARE_TG3TSO5);

M
Matt Carlson 已提交
170
#define TG3_RSS_MIN_NUM_MSIX_VECS	2
L
Linus Torvalds 已提交
171 172 173 174 175

static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");

176
static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
M
Michael Chan 已提交
201
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
202 203 204 205 206 207 208 209 210 211 212 213 214 215
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
M
Michael Chan 已提交
216
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
217 218 219
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
M
Michael Chan 已提交
220
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
221 222 223 224 225 226 227
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
M
Michael Chan 已提交
228 229
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
M
Matt Carlson 已提交
230 231
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
M
Matt Carlson 已提交
232
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
M
Matt Carlson 已提交
233 234
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
M
Matt Carlson 已提交
235 236
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
237 238
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
M
Matt Carlson 已提交
239 240 241
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
M
Matt Carlson 已提交
242
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
M
Matt Carlson 已提交
243 244 245
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
246 247 248 249 250 251
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
252 253 254 255 256 257 258 259
	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
	{}
L
Linus Torvalds 已提交
260 261 262 263
};

MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);

A
Andreas Mohr 已提交
264
static const struct {
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[TG3_NUM_STATS] = {
	{ "rx_octets" },
	{ "rx_fragments" },
	{ "rx_ucast_packets" },
	{ "rx_mcast_packets" },
	{ "rx_bcast_packets" },
	{ "rx_fcs_errors" },
	{ "rx_align_errors" },
	{ "rx_xon_pause_rcvd" },
	{ "rx_xoff_pause_rcvd" },
	{ "rx_mac_ctrl_rcvd" },
	{ "rx_xoff_entered" },
	{ "rx_frame_too_long_errors" },
	{ "rx_jabbers" },
	{ "rx_undersize_packets" },
	{ "rx_in_length_errors" },
	{ "rx_out_length_errors" },
	{ "rx_64_or_less_octet_packets" },
	{ "rx_65_to_127_octet_packets" },
	{ "rx_128_to_255_octet_packets" },
	{ "rx_256_to_511_octet_packets" },
	{ "rx_512_to_1023_octet_packets" },
	{ "rx_1024_to_1522_octet_packets" },
	{ "rx_1523_to_2047_octet_packets" },
	{ "rx_2048_to_4095_octet_packets" },
	{ "rx_4096_to_8191_octet_packets" },
	{ "rx_8192_to_9022_octet_packets" },

	{ "tx_octets" },
	{ "tx_collisions" },

	{ "tx_xon_sent" },
	{ "tx_xoff_sent" },
	{ "tx_flow_control" },
	{ "tx_mac_errors" },
	{ "tx_single_collisions" },
	{ "tx_mult_collisions" },
	{ "tx_deferred" },
	{ "tx_excessive_collisions" },
	{ "tx_late_collisions" },
	{ "tx_collide_2times" },
	{ "tx_collide_3times" },
	{ "tx_collide_4times" },
	{ "tx_collide_5times" },
	{ "tx_collide_6times" },
	{ "tx_collide_7times" },
	{ "tx_collide_8times" },
	{ "tx_collide_9times" },
	{ "tx_collide_10times" },
	{ "tx_collide_11times" },
	{ "tx_collide_12times" },
	{ "tx_collide_13times" },
	{ "tx_collide_14times" },
	{ "tx_collide_15times" },
	{ "tx_ucast_packets" },
	{ "tx_mcast_packets" },
	{ "tx_bcast_packets" },
	{ "tx_carrier_sense_errors" },
	{ "tx_discards" },
	{ "tx_errors" },

	{ "dma_writeq_full" },
	{ "dma_write_prioq_full" },
	{ "rxbds_empty" },
	{ "rx_discards" },
	{ "rx_errors" },
	{ "rx_threshold_hit" },

	{ "dma_readq_full" },
	{ "dma_read_prioq_full" },
	{ "tx_comp_queue_full" },

	{ "ring_set_send_prod_index" },
	{ "ring_status_update" },
	{ "nic_irqs" },
	{ "nic_avoided_irqs" },
	{ "nic_tx_threshold_hit" }
};

A
Andreas Mohr 已提交
345
static const struct {
346 347 348 349 350 351 352 353 354 355
	const char string[ETH_GSTRING_LEN];
} ethtool_test_keys[TG3_NUM_TEST] = {
	{ "nvram test     (online) " },
	{ "link test      (online) " },
	{ "register test  (offline)" },
	{ "memory test    (offline)" },
	{ "loopback test  (offline)" },
	{ "interrupt test (offline)" },
};

356 357 358 359 360 361 362
static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
{
	writel(val, tp->regs + off);
}

static u32 tg3_read32(struct tg3 *tp, u32 off)
{
363
	return (readl(tp->regs + off));
364 365
}

M
Matt Carlson 已提交
366 367 368 369 370 371 372 373 374 375
static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
{
	writel(val, tp->aperegs + off);
}

static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
{
	return (readl(tp->aperegs + off));
}

L
Linus Torvalds 已提交
376 377
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
{
378 379 380
	unsigned long flags;

	spin_lock_irqsave(&tp->indirect_lock, flags);
381 382
	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
383
	spin_unlock_irqrestore(&tp->indirect_lock, flags);
384 385 386 387 388 389
}

static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
{
	writel(val, tp->regs + off);
	readl(tp->regs + off);
L
Linus Torvalds 已提交
390 391
}

392
static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
L
Linus Torvalds 已提交
393
{
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	unsigned long flags;
	u32 val;

	spin_lock_irqsave(&tp->indirect_lock, flags);
	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
	spin_unlock_irqrestore(&tp->indirect_lock, flags);
	return val;
}

static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
{
	unsigned long flags;

	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
				       TG3_64BIT_REG_LOW, val);
		return;
	}
413
	if (off == TG3_RX_STD_PROD_IDX_REG) {
414 415 416
		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
				       TG3_64BIT_REG_LOW, val);
		return;
L
Linus Torvalds 已提交
417
	}
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445

	spin_lock_irqsave(&tp->indirect_lock, flags);
	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
	spin_unlock_irqrestore(&tp->indirect_lock, flags);

	/* In indirect mode when disabling interrupts, we also need
	 * to clear the interrupt bit in the GRC local ctrl register.
	 */
	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
	    (val == 0x1)) {
		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
	}
}

static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
{
	unsigned long flags;
	u32 val;

	spin_lock_irqsave(&tp->indirect_lock, flags);
	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
	spin_unlock_irqrestore(&tp->indirect_lock, flags);
	return val;
}

446 447 448 449 450 451
/* usec_wait specifies the wait time in usec when writing to certain registers
 * where it is unsafe to read back the register without some delay.
 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
 */
static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
452
{
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
	    (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
		/* Non-posted methods */
		tp->write32(tp, off, val);
	else {
		/* Posted method */
		tg3_write32(tp, off, val);
		if (usec_wait)
			udelay(usec_wait);
		tp->read32(tp, off);
	}
	/* Wait again after the read for the posted method to guarantee that
	 * the wait time is met.
	 */
	if (usec_wait)
		udelay(usec_wait);
L
Linus Torvalds 已提交
469 470
}

M
Michael Chan 已提交
471 472 473
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
	tp->write32_mbox(tp, off, val);
474 475 476
	if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
	    !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
		tp->read32_mbox(tp, off);
M
Michael Chan 已提交
477 478
}

479
static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
L
Linus Torvalds 已提交
480 481 482 483 484 485 486 487 488
{
	void __iomem *mbox = tp->regs + off;
	writel(val, mbox);
	if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
		writel(val, mbox);
	if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
		readl(mbox);
}

M
Michael Chan 已提交
489 490 491 492 493 494 495 496 497 498
static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
{
	return (readl(tp->regs + off + GRCMBOX_BASE));
}

static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
{
	writel(val, tp->regs + off + GRCMBOX_BASE);
}

499
#define tw32_mailbox(reg, val)	tp->write32_mbox(tp, reg, val)
M
Michael Chan 已提交
500
#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
501 502
#define tw32_rx_mbox(reg, val)	tp->write32_rx_mbox(tp, reg, val)
#define tw32_tx_mbox(reg, val)	tp->write32_tx_mbox(tp, reg, val)
M
Michael Chan 已提交
503
#define tr32_mailbox(reg)	tp->read32_mbox(tp, reg)
504 505

#define tw32(reg,val)		tp->write32(tp, reg, val)
506 507
#define tw32_f(reg,val)		_tw32_flush(tp,(reg),(val), 0)
#define tw32_wait_f(reg,val,us)	_tw32_flush(tp,(reg),(val), (us))
508
#define tr32(reg)		tp->read32(tp, reg)
L
Linus Torvalds 已提交
509 510 511

static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
512 513
	unsigned long flags;

M
Michael Chan 已提交
514 515 516 517
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
		return;

518
	spin_lock_irqsave(&tp->indirect_lock, flags);
519 520 521
	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
L
Linus Torvalds 已提交
522

523 524 525 526 527
		/* Always leave this as zero. */
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
	} else {
		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
		tw32_f(TG3PCI_MEM_WIN_DATA, val);
M
Michael Chan 已提交
528

529 530 531 532
		/* Always leave this as zero. */
		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
	}
	spin_unlock_irqrestore(&tp->indirect_lock, flags);
533 534
}

L
Linus Torvalds 已提交
535 536
static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
537 538
	unsigned long flags;

M
Michael Chan 已提交
539 540 541 542 543 544
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
		*val = 0;
		return;
	}

545
	spin_lock_irqsave(&tp->indirect_lock, flags);
546 547 548
	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
L
Linus Torvalds 已提交
549

550 551 552 553 554 555 556 557 558
		/* Always leave this as zero. */
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
	} else {
		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
		*val = tr32(TG3PCI_MEM_WIN_DATA);

		/* Always leave this as zero. */
		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
	}
559
	spin_unlock_irqrestore(&tp->indirect_lock, flags);
L
Linus Torvalds 已提交
560 561
}

M
Matt Carlson 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
static void tg3_ape_lock_init(struct tg3 *tp)
{
	int i;

	/* Make sure the driver hasn't any stale locks. */
	for (i = 0; i < 8; i++)
		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
				APE_LOCK_GRANT_DRIVER);
}

static int tg3_ape_lock(struct tg3 *tp, int locknum)
{
	int i, off;
	int ret = 0;
	u32 status;

	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
		return 0;

	switch (locknum) {
582
		case TG3_APE_LOCK_GRC:
M
Matt Carlson 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
		case TG3_APE_LOCK_MEM:
			break;
		default:
			return -EINVAL;
	}

	off = 4 * locknum;

	tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);

	/* Wait for up to 1 millisecond to acquire lock. */
	for (i = 0; i < 100; i++) {
		status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
		if (status == APE_LOCK_GRANT_DRIVER)
			break;
		udelay(10);
	}

	if (status != APE_LOCK_GRANT_DRIVER) {
		/* Revoke the lock request. */
		tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
				APE_LOCK_GRANT_DRIVER);

		ret = -EBUSY;
	}

	return ret;
}

static void tg3_ape_unlock(struct tg3 *tp, int locknum)
{
	int off;

	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
		return;

	switch (locknum) {
620
		case TG3_APE_LOCK_GRC:
M
Matt Carlson 已提交
621 622 623 624 625 626 627 628 629 630
		case TG3_APE_LOCK_MEM:
			break;
		default:
			return;
	}

	off = 4 * locknum;
	tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
}

L
Linus Torvalds 已提交
631 632
static void tg3_disable_ints(struct tg3 *tp)
{
633 634
	int i;

L
Linus Torvalds 已提交
635 636
	tw32(TG3PCI_MISC_HOST_CTRL,
	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
637 638
	for (i = 0; i < tp->irq_max; i++)
		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
L
Linus Torvalds 已提交
639 640 641 642
}

static void tg3_enable_ints(struct tg3 *tp)
{
643 644
	int i;

645 646 647
	tp->irq_sync = 0;
	wmb();

L
Linus Torvalds 已提交
648 649
	tw32(TG3PCI_MISC_HOST_CTRL,
	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
650

M
Matt Carlson 已提交
651
	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
652 653
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];
654
		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
655 656
		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
M
Matt Carlson 已提交
657

M
Matt Carlson 已提交
658
		tp->coal_now |= tnapi->coal_now;
659
	}
M
Matt Carlson 已提交
660 661 662 663 664 665

	/* Force an initial interrupt */
	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
	else
M
Matt Carlson 已提交
666 667 668
		tw32(HOSTCC_MODE, tp->coal_now);

	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
L
Linus Torvalds 已提交
669 670
}

671
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
M
Michael Chan 已提交
672
{
673
	struct tg3 *tp = tnapi->tp;
674
	struct tg3_hw_status *sblk = tnapi->hw_status;
M
Michael Chan 已提交
675 676 677 678 679 680 681 682 683 684
	unsigned int work_exists = 0;

	/* check for phy events */
	if (!(tp->tg3_flags &
	      (TG3_FLAG_USE_LINKCHG_REG |
	       TG3_FLAG_POLL_SERDES))) {
		if (sblk->status & SD_STATUS_LINK_CHG)
			work_exists = 1;
	}
	/* check for RX/TX work to do */
685
	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
686
	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
M
Michael Chan 已提交
687 688 689 690 691
		work_exists = 1;

	return work_exists;
}

692
/* tg3_int_reenable
M
Michael Chan 已提交
693 694
 *  similar to tg3_enable_ints, but it accurately determines whether there
 *  is new work pending and can return without flushing the PIO write
695
 *  which reenables interrupts
L
Linus Torvalds 已提交
696
 */
697
static void tg3_int_reenable(struct tg3_napi *tnapi)
L
Linus Torvalds 已提交
698
{
699 700
	struct tg3 *tp = tnapi->tp;

701
	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
L
Linus Torvalds 已提交
702 703
	mmiowb();

704 705 706 707 708
	/* When doing tagged status, this work check is unnecessary.
	 * The last_tag we write above tells the chip which piece of
	 * work we've completed.
	 */
	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
709
	    tg3_has_work(tnapi))
M
Michael Chan 已提交
710
		tw32(HOSTCC_MODE, tp->coalesce_mode |
711
		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
L
Linus Torvalds 已提交
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
static void tg3_napi_disable(struct tg3 *tp)
{
	int i;

	for (i = tp->irq_cnt - 1; i >= 0; i--)
		napi_disable(&tp->napi[i].napi);
}

static void tg3_napi_enable(struct tg3 *tp)
{
	int i;

	for (i = 0; i < tp->irq_cnt; i++)
		napi_enable(&tp->napi[i].napi);
}

L
Linus Torvalds 已提交
730 731
static inline void tg3_netif_stop(struct tg3 *tp)
{
732
	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
733
	tg3_napi_disable(tp);
L
Linus Torvalds 已提交
734 735 736 737 738
	netif_tx_disable(tp->dev);
}

static inline void tg3_netif_start(struct tg3 *tp)
{
M
Matt Carlson 已提交
739 740 741
	/* NOTE: unconditional netif_tx_wake_all_queues is only
	 * appropriate so long as all callers are assured to
	 * have free tx slots (such as after tg3_init_hw)
L
Linus Torvalds 已提交
742
	 */
M
Matt Carlson 已提交
743 744
	netif_tx_wake_all_queues(tp->dev);

745 746
	tg3_napi_enable(tp);
	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
747
	tg3_enable_ints(tp);
L
Linus Torvalds 已提交
748 749 750 751
}

static void tg3_switch_clocks(struct tg3 *tp)
{
M
Matt Carlson 已提交
752
	u32 clock_ctrl;
L
Linus Torvalds 已提交
753 754
	u32 orig_clock_ctrl;

755 756
	if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
M
Michael Chan 已提交
757 758
		return;

M
Matt Carlson 已提交
759 760
	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);

L
Linus Torvalds 已提交
761 762 763 764 765 766 767 768
	orig_clock_ctrl = clock_ctrl;
	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
		       CLOCK_CTRL_CLKRUN_OENABLE |
		       0x1f);
	tp->pci_clock_ctrl = clock_ctrl;

	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
769 770
			tw32_wait_f(TG3PCI_CLOCK_CTRL,
				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
L
Linus Torvalds 已提交
771 772
		}
	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
773 774 775 776 777 778 779
		tw32_wait_f(TG3PCI_CLOCK_CTRL,
			    clock_ctrl |
			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
			    40);
		tw32_wait_f(TG3PCI_CLOCK_CTRL,
			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
			    40);
L
Linus Torvalds 已提交
780
	}
781
	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
L
Linus Torvalds 已提交
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
}

#define PHY_BUSY_LOOPS	5000

static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
{
	u32 frame_val;
	unsigned int loops;
	int ret;

	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
		tw32_f(MAC_MI_MODE,
		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
		udelay(80);
	}

	*val = 0x0;

800
	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
L
Linus Torvalds 已提交
801 802 803 804
		      MI_COM_PHY_ADDR_MASK);
	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
		      MI_COM_REG_ADDR_MASK);
	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
805

L
Linus Torvalds 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
	tw32_f(MAC_MI_COM, frame_val);

	loops = PHY_BUSY_LOOPS;
	while (loops != 0) {
		udelay(10);
		frame_val = tr32(MAC_MI_COM);

		if ((frame_val & MI_COM_BUSY) == 0) {
			udelay(5);
			frame_val = tr32(MAC_MI_COM);
			break;
		}
		loops -= 1;
	}

	ret = -EBUSY;
	if (loops != 0) {
		*val = frame_val & MI_COM_DATA_MASK;
		ret = 0;
	}

	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
		tw32_f(MAC_MI_MODE, tp->mi_mode);
		udelay(80);
	}

	return ret;
}

static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
{
	u32 frame_val;
	unsigned int loops;
	int ret;

841
	if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
M
Michael Chan 已提交
842 843 844
	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
		return 0;

L
Linus Torvalds 已提交
845 846 847 848 849 850
	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
		tw32_f(MAC_MI_MODE,
		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
		udelay(80);
	}

851
	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
L
Linus Torvalds 已提交
852 853 854 855 856
		      MI_COM_PHY_ADDR_MASK);
	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
		      MI_COM_REG_ADDR_MASK);
	frame_val |= (val & MI_COM_DATA_MASK);
	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
857

L
Linus Torvalds 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	tw32_f(MAC_MI_COM, frame_val);

	loops = PHY_BUSY_LOOPS;
	while (loops != 0) {
		udelay(10);
		frame_val = tr32(MAC_MI_COM);
		if ((frame_val & MI_COM_BUSY) == 0) {
			udelay(5);
			frame_val = tr32(MAC_MI_COM);
			break;
		}
		loops -= 1;
	}

	ret = -EBUSY;
	if (loops != 0)
		ret = 0;

	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
		tw32_f(MAC_MI_MODE, tp->mi_mode);
		udelay(80);
	}

	return ret;
}

M
Matt Carlson 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908
static int tg3_bmcr_reset(struct tg3 *tp)
{
	u32 phy_control;
	int limit, err;

	/* OK, reset it, and poll the BMCR_RESET bit until it
	 * clears or we time out.
	 */
	phy_control = BMCR_RESET;
	err = tg3_writephy(tp, MII_BMCR, phy_control);
	if (err != 0)
		return -EBUSY;

	limit = 5000;
	while (limit--) {
		err = tg3_readphy(tp, MII_BMCR, &phy_control);
		if (err != 0)
			return -EBUSY;

		if ((phy_control & BMCR_RESET) == 0) {
			udelay(40);
			break;
		}
		udelay(10);
	}
R
Roel Kluin 已提交
909
	if (limit < 0)
M
Matt Carlson 已提交
910 911 912 913 914
		return -EBUSY;

	return 0;
}

M
Matt Carlson 已提交
915 916
static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
{
F
Francois Romieu 已提交
917
	struct tg3 *tp = bp->priv;
M
Matt Carlson 已提交
918 919
	u32 val;

M
Matt Carlson 已提交
920
	spin_lock_bh(&tp->lock);
M
Matt Carlson 已提交
921 922

	if (tg3_readphy(tp, reg, &val))
M
Matt Carlson 已提交
923 924 925
		val = -EIO;

	spin_unlock_bh(&tp->lock);
M
Matt Carlson 已提交
926 927 928 929 930 931

	return val;
}

static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
{
F
Francois Romieu 已提交
932
	struct tg3 *tp = bp->priv;
M
Matt Carlson 已提交
933
	u32 ret = 0;
M
Matt Carlson 已提交
934

M
Matt Carlson 已提交
935
	spin_lock_bh(&tp->lock);
M
Matt Carlson 已提交
936 937

	if (tg3_writephy(tp, reg, val))
M
Matt Carlson 已提交
938
		ret = -EIO;
M
Matt Carlson 已提交
939

M
Matt Carlson 已提交
940 941 942
	spin_unlock_bh(&tp->lock);

	return ret;
M
Matt Carlson 已提交
943 944 945 946 947 948 949
}

static int tg3_mdio_reset(struct mii_bus *bp)
{
	return 0;
}

M
Matt Carlson 已提交
950
static void tg3_mdio_config_5785(struct tg3 *tp)
M
Matt Carlson 已提交
951 952
{
	u32 val;
M
Matt Carlson 已提交
953
	struct phy_device *phydev;
M
Matt Carlson 已提交
954

955
	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
956
	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
957 958
	case PHY_ID_BCM50610:
	case PHY_ID_BCM50610M:
M
Matt Carlson 已提交
959 960
		val = MAC_PHYCFG2_50610_LED_MODES;
		break;
961
	case PHY_ID_BCMAC131:
M
Matt Carlson 已提交
962 963
		val = MAC_PHYCFG2_AC131_LED_MODES;
		break;
964
	case PHY_ID_RTL8211C:
M
Matt Carlson 已提交
965 966
		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
		break;
967
	case PHY_ID_RTL8201E:
M
Matt Carlson 已提交
968 969 970
		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
		break;
	default:
M
Matt Carlson 已提交
971
		return;
M
Matt Carlson 已提交
972 973 974 975 976 977
	}

	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
		tw32(MAC_PHYCFG2, val);

		val = tr32(MAC_PHYCFG1);
M
Matt Carlson 已提交
978 979 980
		val &= ~(MAC_PHYCFG1_RGMII_INT |
			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
M
Matt Carlson 已提交
981 982 983 984 985
		tw32(MAC_PHYCFG1, val);

		return;
	}

986
	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
M
Matt Carlson 已提交
987 988 989 990 991 992 993 994
		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
		       MAC_PHYCFG2_FMODE_MASK_MASK |
		       MAC_PHYCFG2_GMODE_MASK_MASK |
		       MAC_PHYCFG2_ACT_MASK_MASK   |
		       MAC_PHYCFG2_QUAL_MASK_MASK |
		       MAC_PHYCFG2_INBAND_ENABLE;

	tw32(MAC_PHYCFG2, val);
M
Matt Carlson 已提交
995

M
Matt Carlson 已提交
996 997 998
	val = tr32(MAC_PHYCFG1);
	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
999
	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
M
Matt Carlson 已提交
1000 1001 1002 1003 1004
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
	}
M
Matt Carlson 已提交
1005 1006 1007
	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
	tw32(MAC_PHYCFG1, val);
M
Matt Carlson 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016

	val = tr32(MAC_EXT_RGMII_MODE);
	val &= ~(MAC_RGMII_MODE_RX_INT_B |
		 MAC_RGMII_MODE_RX_QUALITY |
		 MAC_RGMII_MODE_RX_ACTIVITY |
		 MAC_RGMII_MODE_RX_ENG_DET |
		 MAC_RGMII_MODE_TX_ENABLE |
		 MAC_RGMII_MODE_TX_LOWPWR |
		 MAC_RGMII_MODE_TX_RESET);
1017
	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
M
Matt Carlson 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
			val |= MAC_RGMII_MODE_RX_INT_B |
			       MAC_RGMII_MODE_RX_QUALITY |
			       MAC_RGMII_MODE_RX_ACTIVITY |
			       MAC_RGMII_MODE_RX_ENG_DET;
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
			val |= MAC_RGMII_MODE_TX_ENABLE |
			       MAC_RGMII_MODE_TX_LOWPWR |
			       MAC_RGMII_MODE_TX_RESET;
	}
	tw32(MAC_EXT_RGMII_MODE, val);
}

M
Matt Carlson 已提交
1031 1032 1033 1034 1035
static void tg3_mdio_start(struct tg3 *tp)
{
	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
	tw32_f(MAC_MI_MODE, tp->mi_mode);
	udelay(80);
M
Matt Carlson 已提交
1036

M
Matt Carlson 已提交
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
		tg3_mdio_config_5785(tp);
}

static int tg3_mdio_init(struct tg3 *tp)
{
	int i;
	u32 reg;
	struct phy_device *phydev;

1048 1049 1050 1051 1052 1053 1054 1055 1056
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
		u32 funcnum, is_serdes;

		funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
		if (funcnum)
			tp->phy_addr = 2;
		else
			tp->phy_addr = 1;

1057 1058 1059 1060 1061
		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
		else
			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1062 1063 1064
		if (is_serdes)
			tp->phy_addr += 7;
	} else
1065
		tp->phy_addr = TG3_PHY_MII_ADDR;
1066

M
Matt Carlson 已提交
1067 1068 1069 1070 1071 1072
	tg3_mdio_start(tp);

	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
	    (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
		return 0;

1073 1074 1075
	tp->mdio_bus = mdiobus_alloc();
	if (tp->mdio_bus == NULL)
		return -ENOMEM;
M
Matt Carlson 已提交
1076

1077 1078
	tp->mdio_bus->name     = "tg3 mdio bus";
	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
M
Matt Carlson 已提交
1079
		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1080 1081 1082 1083 1084
	tp->mdio_bus->priv     = tp;
	tp->mdio_bus->parent   = &tp->pdev->dev;
	tp->mdio_bus->read     = &tg3_mdio_read;
	tp->mdio_bus->write    = &tg3_mdio_write;
	tp->mdio_bus->reset    = &tg3_mdio_reset;
1085
	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1086
	tp->mdio_bus->irq      = &tp->mdio_irq[0];
M
Matt Carlson 已提交
1087 1088

	for (i = 0; i < PHY_MAX_ADDR; i++)
1089
		tp->mdio_bus->irq[i] = PHY_POLL;
M
Matt Carlson 已提交
1090 1091 1092 1093 1094 1095 1096 1097 1098

	/* The bus registration will look for all the PHYs on the mdio bus.
	 * Unfortunately, it does not ensure the PHY is powered up before
	 * accessing the PHY ID registers.  A chip reset is the
	 * quickest way to bring the device back to an operational state..
	 */
	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
		tg3_bmcr_reset(tp);

1099
	i = mdiobus_register(tp->mdio_bus);
M
Matt Carlson 已提交
1100
	if (i) {
1101
		netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
M
Matt Carlson 已提交
1102
		mdiobus_free(tp->mdio_bus);
M
Matt Carlson 已提交
1103 1104
		return i;
	}
M
Matt Carlson 已提交
1105

1106
	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
1107

M
Matt Carlson 已提交
1108
	if (!phydev || !phydev->drv) {
1109
		netdev_warn(tp->dev, "No PHY devices\n");
M
Matt Carlson 已提交
1110 1111 1112 1113 1114 1115
		mdiobus_unregister(tp->mdio_bus);
		mdiobus_free(tp->mdio_bus);
		return -ENODEV;
	}

	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1116
	case PHY_ID_BCM57780:
M
Matt Carlson 已提交
1117
		phydev->interface = PHY_INTERFACE_MODE_GMII;
1118
		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
M
Matt Carlson 已提交
1119
		break;
1120 1121
	case PHY_ID_BCM50610:
	case PHY_ID_BCM50610M:
1122
		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1123
				     PHY_BRCM_RX_REFCLK_UNUSED |
1124
				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1125
				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1126
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
M
Matt Carlson 已提交
1127 1128 1129 1130 1131
			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
M
Matt Carlson 已提交
1132
		/* fallthru */
1133
	case PHY_ID_RTL8211C:
M
Matt Carlson 已提交
1134
		phydev->interface = PHY_INTERFACE_MODE_RGMII;
M
Matt Carlson 已提交
1135
		break;
1136 1137
	case PHY_ID_RTL8201E:
	case PHY_ID_BCMAC131:
M
Matt Carlson 已提交
1138
		phydev->interface = PHY_INTERFACE_MODE_MII;
1139
		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1140
		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
M
Matt Carlson 已提交
1141 1142 1143
		break;
	}

M
Matt Carlson 已提交
1144 1145 1146 1147
	tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
		tg3_mdio_config_5785(tp);
M
Matt Carlson 已提交
1148 1149

	return 0;
M
Matt Carlson 已提交
1150 1151 1152 1153 1154 1155
}

static void tg3_mdio_fini(struct tg3 *tp)
{
	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1156 1157
		mdiobus_unregister(tp->mdio_bus);
		mdiobus_free(tp->mdio_bus);
M
Matt Carlson 已提交
1158 1159 1160
	}
}

M
Matt Carlson 已提交
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
/* tp->lock is held. */
static inline void tg3_generate_fw_event(struct tg3 *tp)
{
	u32 val;

	val = tr32(GRC_RX_CPU_EVENT);
	val |= GRC_RX_CPU_DRIVER_EVENT;
	tw32_f(GRC_RX_CPU_EVENT, val);

	tp->last_event_jiffies = jiffies;
}

#define TG3_FW_EVENT_TIMEOUT_USEC 2500

M
Matt Carlson 已提交
1175 1176 1177 1178
/* tp->lock is held. */
static void tg3_wait_for_event_ack(struct tg3 *tp)
{
	int i;
M
Matt Carlson 已提交
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	unsigned int delay_cnt;
	long time_remain;

	/* If enough time has passed, no wait is necessary. */
	time_remain = (long)(tp->last_event_jiffies + 1 +
		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
		      (long)jiffies;
	if (time_remain < 0)
		return;

	/* Check if we can shorten the wait time. */
	delay_cnt = jiffies_to_usecs(time_remain);
	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
	delay_cnt = (delay_cnt >> 3) + 1;
M
Matt Carlson 已提交
1194

M
Matt Carlson 已提交
1195
	for (i = 0; i < delay_cnt; i++) {
M
Matt Carlson 已提交
1196 1197
		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
			break;
M
Matt Carlson 已提交
1198
		udelay(8);
M
Matt Carlson 已提交
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
	}
}

/* tp->lock is held. */
static void tg3_ump_link_report(struct tg3 *tp)
{
	u32 reg;
	u32 val;

	if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
		return;

	tg3_wait_for_event_ack(tp);

	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);

	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);

	val = 0;
	if (!tg3_readphy(tp, MII_BMCR, &reg))
		val = reg << 16;
	if (!tg3_readphy(tp, MII_BMSR, &reg))
		val |= (reg & 0xffff);
	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);

	val = 0;
	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
		val = reg << 16;
	if (!tg3_readphy(tp, MII_LPA, &reg))
		val |= (reg & 0xffff);
	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);

	val = 0;
	if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
			val = reg << 16;
		if (!tg3_readphy(tp, MII_STAT1000, &reg))
			val |= (reg & 0xffff);
	}
	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);

	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
		val = reg << 16;
	else
		val = 0;
	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);

M
Matt Carlson 已提交
1247
	tg3_generate_fw_event(tp);
M
Matt Carlson 已提交
1248 1249 1250 1251 1252
}

static void tg3_link_report(struct tg3 *tp)
{
	if (!netif_carrier_ok(tp->dev)) {
1253
		netif_info(tp, link, tp->dev, "Link is down\n");
M
Matt Carlson 已提交
1254 1255
		tg3_ump_link_report(tp);
	} else if (netif_msg_link(tp)) {
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
			    (tp->link_config.active_speed == SPEED_1000 ?
			     1000 :
			     (tp->link_config.active_speed == SPEED_100 ?
			      100 : 10)),
			    (tp->link_config.active_duplex == DUPLEX_FULL ?
			     "full" : "half"));

		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
			    "on" : "off",
			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
			    "on" : "off");
M
Matt Carlson 已提交
1269 1270 1271 1272 1273 1274 1275 1276
		tg3_ump_link_report(tp);
	}
}

static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
{
	u16 miireg;

1277
	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
M
Matt Carlson 已提交
1278
		miireg = ADVERTISE_PAUSE_CAP;
1279
	else if (flow_ctrl & FLOW_CTRL_TX)
M
Matt Carlson 已提交
1280
		miireg = ADVERTISE_PAUSE_ASYM;
1281
	else if (flow_ctrl & FLOW_CTRL_RX)
M
Matt Carlson 已提交
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292
		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
	else
		miireg = 0;

	return miireg;
}

static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
	u16 miireg;

1293
	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
M
Matt Carlson 已提交
1294
		miireg = ADVERTISE_1000XPAUSE;
1295
	else if (flow_ctrl & FLOW_CTRL_TX)
M
Matt Carlson 已提交
1296
		miireg = ADVERTISE_1000XPSE_ASYM;
1297
	else if (flow_ctrl & FLOW_CTRL_RX)
M
Matt Carlson 已提交
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
	else
		miireg = 0;

	return miireg;
}

static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
	u8 cap = 0;

	if (lcladv & ADVERTISE_1000XPAUSE) {
		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
			if (rmtadv & LPA_1000XPAUSE)
1312
				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
M
Matt Carlson 已提交
1313
			else if (rmtadv & LPA_1000XPAUSE_ASYM)
1314
				cap = FLOW_CTRL_RX;
M
Matt Carlson 已提交
1315 1316
		} else {
			if (rmtadv & LPA_1000XPAUSE)
1317
				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
M
Matt Carlson 已提交
1318 1319 1320
		}
	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1321
			cap = FLOW_CTRL_TX;
M
Matt Carlson 已提交
1322 1323 1324 1325 1326
	}

	return cap;
}

M
Matt Carlson 已提交
1327
static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
M
Matt Carlson 已提交
1328
{
M
Matt Carlson 已提交
1329
	u8 autoneg;
M
Matt Carlson 已提交
1330
	u8 flowctrl = 0;
M
Matt Carlson 已提交
1331 1332 1333
	u32 old_rx_mode = tp->rx_mode;
	u32 old_tx_mode = tp->tx_mode;

M
Matt Carlson 已提交
1334
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1335
		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
M
Matt Carlson 已提交
1336 1337 1338 1339
	else
		autoneg = tp->link_config.autoneg;

	if (autoneg == AUTONEG_ENABLE &&
M
Matt Carlson 已提交
1340 1341
	    (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
M
Matt Carlson 已提交
1342
			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
M
Matt Carlson 已提交
1343
		else
1344
			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
M
Matt Carlson 已提交
1345 1346
	} else
		flowctrl = tp->link_config.flowctrl;
M
Matt Carlson 已提交
1347

M
Matt Carlson 已提交
1348
	tp->link_config.active_flowctrl = flowctrl;
M
Matt Carlson 已提交
1349

1350
	if (flowctrl & FLOW_CTRL_RX)
M
Matt Carlson 已提交
1351 1352 1353 1354
		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
	else
		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;

M
Matt Carlson 已提交
1355
	if (old_rx_mode != tp->rx_mode)
M
Matt Carlson 已提交
1356 1357
		tw32_f(MAC_RX_MODE, tp->rx_mode);

1358
	if (flowctrl & FLOW_CTRL_TX)
M
Matt Carlson 已提交
1359 1360 1361 1362
		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
	else
		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;

M
Matt Carlson 已提交
1363
	if (old_tx_mode != tp->tx_mode)
M
Matt Carlson 已提交
1364 1365 1366
		tw32_f(MAC_TX_MODE, tp->tx_mode);
}

M
Matt Carlson 已提交
1367 1368 1369 1370 1371
static void tg3_adjust_link(struct net_device *dev)
{
	u8 oldflowctrl, linkmesg = 0;
	u32 mac_mode, lcl_adv, rmt_adv;
	struct tg3 *tp = netdev_priv(dev);
1372
	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
1373

M
Matt Carlson 已提交
1374
	spin_lock_bh(&tp->lock);
M
Matt Carlson 已提交
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386

	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
				    MAC_MODE_HALF_DUPLEX);

	oldflowctrl = tp->link_config.active_flowctrl;

	if (phydev->link) {
		lcl_adv = 0;
		rmt_adv = 0;

		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
			mac_mode |= MAC_MODE_PORT_MODE_MII;
1387 1388
		else if (phydev->speed == SPEED_1000 ||
			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
M
Matt Carlson 已提交
1389
			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1390 1391
		else
			mac_mode |= MAC_MODE_PORT_MODE_MII;
M
Matt Carlson 已提交
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414

		if (phydev->duplex == DUPLEX_HALF)
			mac_mode |= MAC_MODE_HALF_DUPLEX;
		else {
			lcl_adv = tg3_advert_flowctrl_1000T(
				  tp->link_config.flowctrl);

			if (phydev->pause)
				rmt_adv = LPA_PAUSE_CAP;
			if (phydev->asym_pause)
				rmt_adv |= LPA_PAUSE_ASYM;
		}

		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
	} else
		mac_mode |= MAC_MODE_PORT_MODE_GMII;

	if (mac_mode != tp->mac_mode) {
		tp->mac_mode = mac_mode;
		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);
	}

M
Matt Carlson 已提交
1415 1416 1417 1418 1419 1420 1421 1422 1423
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
		if (phydev->speed == SPEED_10)
			tw32(MAC_MI_STAT,
			     MAC_MI_STAT_10MBPS_MODE |
			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
		else
			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
	}

M
Matt Carlson 已提交
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
		tw32(MAC_TX_LENGTHS,
		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
		      (6 << TX_LENGTHS_IPG_SHIFT) |
		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
	else
		tw32(MAC_TX_LENGTHS,
		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
		      (6 << TX_LENGTHS_IPG_SHIFT) |
		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));

	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
	    phydev->speed != tp->link_config.active_speed ||
	    phydev->duplex != tp->link_config.active_duplex ||
	    oldflowctrl != tp->link_config.active_flowctrl)
	    linkmesg = 1;

	tp->link_config.active_speed = phydev->speed;
	tp->link_config.active_duplex = phydev->duplex;

M
Matt Carlson 已提交
1445
	spin_unlock_bh(&tp->lock);
M
Matt Carlson 已提交
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460

	if (linkmesg)
		tg3_link_report(tp);
}

static int tg3_phy_init(struct tg3 *tp)
{
	struct phy_device *phydev;

	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
		return 0;

	/* Bring the PHY back to a known state. */
	tg3_bmcr_reset(tp);

1461
	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
1462 1463

	/* Attach the MAC to the PHY. */
1464
	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
M
Matt Carlson 已提交
1465
			     phydev->dev_flags, phydev->interface);
M
Matt Carlson 已提交
1466
	if (IS_ERR(phydev)) {
1467
		netdev_err(tp->dev, "Could not attach to PHY\n");
M
Matt Carlson 已提交
1468 1469 1470 1471
		return PTR_ERR(phydev);
	}

	/* Mask with MAC supported features. */
M
Matt Carlson 已提交
1472 1473 1474
	switch (phydev->interface) {
	case PHY_INTERFACE_MODE_GMII:
	case PHY_INTERFACE_MODE_RGMII:
M
Matt Carlson 已提交
1475 1476 1477 1478 1479 1480 1481
		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
			phydev->supported &= (PHY_GBIT_FEATURES |
					      SUPPORTED_Pause |
					      SUPPORTED_Asym_Pause);
			break;
		}
		/* fallthru */
M
Matt Carlson 已提交
1482 1483 1484 1485 1486 1487
	case PHY_INTERFACE_MODE_MII:
		phydev->supported &= (PHY_BASIC_FEATURES |
				      SUPPORTED_Pause |
				      SUPPORTED_Asym_Pause);
		break;
	default:
1488
		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
M
Matt Carlson 已提交
1489 1490 1491 1492
		return -EINVAL;
	}

	tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
M
Matt Carlson 已提交
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505

	phydev->advertising = phydev->supported;

	return 0;
}

static void tg3_phy_start(struct tg3 *tp)
{
	struct phy_device *phydev;

	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
		return;

1506
	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525

	if (tp->link_config.phy_is_low_power) {
		tp->link_config.phy_is_low_power = 0;
		phydev->speed = tp->link_config.orig_speed;
		phydev->duplex = tp->link_config.orig_duplex;
		phydev->autoneg = tp->link_config.orig_autoneg;
		phydev->advertising = tp->link_config.orig_advertising;
	}

	phy_start(phydev);

	phy_start_aneg(phydev);
}

static void tg3_phy_stop(struct tg3 *tp)
{
	if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
		return;

1526
	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
M
Matt Carlson 已提交
1527 1528 1529 1530 1531
}

static void tg3_phy_fini(struct tg3 *tp)
{
	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1532
		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
M
Matt Carlson 已提交
1533 1534 1535 1536
		tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
	}
}

1537 1538 1539 1540 1541 1542
static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
{
	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
{
	u32 phytest;

	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
		u32 phy;

		tg3_writephy(tp, MII_TG3_FET_TEST,
			     phytest | MII_TG3_FET_SHADOW_EN);
		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
			if (enable)
				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
			else
				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
		}
		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
	}
}

1563 1564 1565 1566
static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
	u32 reg;

1567 1568 1569
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
		(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1570 1571
		return;

1572 1573 1574 1575 1576
	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
		tg3_phy_fet_toggle_apd(tp, enable);
		return;
	}

1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	reg = MII_TG3_MISC_SHDW_WREN |
	      MII_TG3_MISC_SHDW_SCR5_SEL |
	      MII_TG3_MISC_SHDW_SCR5_LPED |
	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
	      MII_TG3_MISC_SHDW_SCR5_SDTL |
	      MII_TG3_MISC_SHDW_SCR5_C125OE;
	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;

	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);


	reg = MII_TG3_MISC_SHDW_WREN |
	      MII_TG3_MISC_SHDW_APD_SEL |
	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
	if (enable)
		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;

	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
}

M
Matt Carlson 已提交
1598 1599 1600 1601 1602 1603 1604 1605
static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
	u32 phy;

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
		return;

1606
	if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
M
Matt Carlson 已提交
1607 1608
		u32 ephy;

M
Matt Carlson 已提交
1609 1610 1611 1612 1613 1614
		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;

			tg3_writephy(tp, MII_TG3_FET_TEST,
				     ephy | MII_TG3_FET_SHADOW_EN);
			if (!tg3_readphy(tp, reg, &phy)) {
M
Matt Carlson 已提交
1615
				if (enable)
M
Matt Carlson 已提交
1616
					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
M
Matt Carlson 已提交
1617
				else
M
Matt Carlson 已提交
1618 1619
					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
				tg3_writephy(tp, reg, phy);
M
Matt Carlson 已提交
1620
			}
M
Matt Carlson 已提交
1621
			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
M
Matt Carlson 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
		}
	} else {
		phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
		      MII_TG3_AUXCTL_SHDWSEL_MISC;
		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
			if (enable)
				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
			else
				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
			phy |= MII_TG3_AUXCTL_MISC_WREN;
			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
		}
	}
}

L
Linus Torvalds 已提交
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
static void tg3_phy_set_wirespeed(struct tg3 *tp)
{
	u32 val;

	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
		return;

	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
	    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
		tg3_writephy(tp, MII_TG3_AUX_CTRL,
			     (val | (1 << 15) | (1 << 4)));
}

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
static void tg3_phy_apply_otp(struct tg3 *tp)
{
	u32 otp, phy;

	if (!tp->phy_otp)
		return;

	otp = tp->phy_otp;

	/* Enable SM_DSP clock and tx 6dB coding. */
	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
	      MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
	      MII_TG3_AUXCTL_ACTL_TX_6DB;
	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);

	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);

	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);

	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);

	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);

	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);

	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);

	/* Turn off SM_DSP clock. */
	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
	      MII_TG3_AUXCTL_ACTL_TX_6DB;
	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
}

L
Linus Torvalds 已提交
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
static int tg3_wait_macro_done(struct tg3 *tp)
{
	int limit = 100;

	while (limit--) {
		u32 tmp32;

		if (!tg3_readphy(tp, 0x16, &tmp32)) {
			if ((tmp32 & 0x1000) == 0)
				break;
		}
	}
R
Roel Kluin 已提交
1706
	if (limit < 0)
L
Linus Torvalds 已提交
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
		return -EBUSY;

	return 0;
}

static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
{
	static const u32 test_pat[4][6] = {
	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
	};
	int chan;

	for (chan = 0; chan < 4; chan++) {
		int i;

		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
			     (chan * 0x2000) | 0x0200);
		tg3_writephy(tp, 0x16, 0x0002);

		for (i = 0; i < 6; i++)
			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
				     test_pat[chan][i]);

		tg3_writephy(tp, 0x16, 0x0202);
		if (tg3_wait_macro_done(tp)) {
			*resetp = 1;
			return -EBUSY;
		}

		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
			     (chan * 0x2000) | 0x0200);
		tg3_writephy(tp, 0x16, 0x0082);
		if (tg3_wait_macro_done(tp)) {
			*resetp = 1;
			return -EBUSY;
		}

		tg3_writephy(tp, 0x16, 0x0802);
		if (tg3_wait_macro_done(tp)) {
			*resetp = 1;
			return -EBUSY;
		}

		for (i = 0; i < 6; i += 2) {
			u32 low, high;

			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
			    tg3_wait_macro_done(tp)) {
				*resetp = 1;
				return -EBUSY;
			}
			low &= 0x7fff;
			high &= 0x000f;
			if (low != test_pat[chan][i] ||
			    high != test_pat[chan][i+1]) {
				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);

				return -EBUSY;
			}
		}
	}

	return 0;
}

static int tg3_phy_reset_chanpat(struct tg3 *tp)
{
	int chan;

	for (chan = 0; chan < 4; chan++) {
		int i;

		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
			     (chan * 0x2000) | 0x0200);
		tg3_writephy(tp, 0x16, 0x0002);
		for (i = 0; i < 6; i++)
			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
		tg3_writephy(tp, 0x16, 0x0202);
		if (tg3_wait_macro_done(tp))
			return -EBUSY;
	}

	return 0;
}

static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
{
	u32 reg32, phy9_orig;
	int retries, do_phy_reset, err;

	retries = 10;
	do_phy_reset = 1;
	do {
		if (do_phy_reset) {
			err = tg3_bmcr_reset(tp);
			if (err)
				return err;
			do_phy_reset = 0;
		}

		/* Disable transmitter and interrupt.  */
		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
			continue;

		reg32 |= 0x3000;
		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);

		/* Set full-duplex, 1000 mbps.  */
		tg3_writephy(tp, MII_BMCR,
			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000);

		/* Set to master mode.  */
		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
			continue;

		tg3_writephy(tp, MII_TG3_CTRL,
			     (MII_TG3_CTRL_AS_MASTER |
			      MII_TG3_CTRL_ENABLE_AS_MASTER));

		/* Enable SM_DSP_CLOCK and 6dB.  */
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);

		/* Block the PHY control access.  */
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);

		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
		if (!err)
			break;
	} while (--retries);

	err = tg3_phy_reset_chanpat(tp);
	if (err)
		return err;

	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);

	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
	tg3_writephy(tp, 0x16, 0x0000);

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
		/* Set Extended packet length bit for jumbo frames */
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
	}
	else {
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
	}

	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);

	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
		reg32 &= ~0x3000;
		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
	} else if (!err)
		err = -EBUSY;

	return err;
}

/* This will reset the tigon3 PHY if there is no valid
 * link unless the FORCE argument is non-zero.
 */
static int tg3_phy_reset(struct tg3 *tp)
{
1879
	u32 cpmuctrl;
L
Linus Torvalds 已提交
1880 1881 1882
	u32 phy_status;
	int err;

1883 1884 1885 1886 1887 1888 1889
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		u32 val;

		val = tr32(GRC_MISC_CFG);
		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
		udelay(40);
	}
L
Linus Torvalds 已提交
1890 1891 1892 1893 1894
	err  = tg3_readphy(tp, MII_BMSR, &phy_status);
	err |= tg3_readphy(tp, MII_BMSR, &phy_status);
	if (err != 0)
		return -EBUSY;

1895 1896 1897 1898 1899
	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
		netif_carrier_off(tp->dev);
		tg3_link_report(tp);
	}

L
Linus Torvalds 已提交
1900 1901 1902 1903 1904 1905 1906 1907 1908
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
		err = tg3_phy_reset_5703_4_5(tp);
		if (err)
			return err;
		goto out;
	}

1909 1910 1911 1912 1913 1914 1915 1916 1917
	cpmuctrl = 0;
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
		cpmuctrl = tr32(TG3_CPMU_CTRL);
		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
			tw32(TG3_CPMU_CTRL,
			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
	}

L
Linus Torvalds 已提交
1918 1919 1920 1921
	err = tg3_bmcr_reset(tp);
	if (err)
		return err;

1922 1923 1924 1925 1926 1927 1928 1929 1930
	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
		u32 phy;

		phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);

		tw32(TG3_CPMU_CTRL, cpmuctrl);
	}

1931 1932
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
		u32 val;

		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
		    CPMU_LSPD_1000MB_MACCLK_12_5) {
			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
			udelay(40);
			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
		}
	}

1944 1945 1946 1947
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
	    (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
		return 0;

1948 1949
	tg3_phy_apply_otp(tp);

1950 1951 1952 1953 1954
	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
		tg3_phy_toggle_apd(tp, true);
	else
		tg3_phy_toggle_apd(tp, false);

L
Linus Torvalds 已提交
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
out:
	if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
	}
	if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
		tg3_writephy(tp, 0x1c, 0x8d68);
		tg3_writephy(tp, 0x1c, 0x8d68);
	}
	if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
	}
M
Michael Chan 已提交
1978 1979 1980
	else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1981 1982 1983 1984 1985 1986
		if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
			tg3_writephy(tp, MII_TG3_TEST1,
				     MII_TG3_TEST1_TRIM_EN | 0x4);
		} else
			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
M
Michael Chan 已提交
1987 1988
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
	}
L
Linus Torvalds 已提交
1989 1990
	/* Set Extended packet length bit (bit 14) on all chips that */
	/* support jumbo frames */
1991
	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
L
Linus Torvalds 已提交
1992 1993
		/* Cannot do read-modify-write on 5401 */
		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1994
	} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
L
Linus Torvalds 已提交
1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
		u32 phy_reg;

		/* Set bit 14 with read-modify-write to preserve other bits */
		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
	}

	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
	 * jumbo frames transmission.
	 */
2006
	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
L
Linus Torvalds 已提交
2007 2008 2009 2010 2011 2012 2013
		u32 phy_reg;

		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
		    tg3_writephy(tp, MII_TG3_EXT_CTRL,
				 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
	}

M
Michael Chan 已提交
2014 2015
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		/* adjust output voltage */
M
Matt Carlson 已提交
2016
		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
M
Michael Chan 已提交
2017 2018
	}

M
Matt Carlson 已提交
2019
	tg3_phy_toggle_automdix(tp, 1);
L
Linus Torvalds 已提交
2020 2021 2022 2023 2024 2025 2026 2027
	tg3_phy_set_wirespeed(tp);
	return 0;
}

static void tg3_frob_aux_power(struct tg3 *tp)
{
	struct tg3 *tp_peer = tp;

2028 2029 2030
	/* The GPIOs do something completely different on 57765. */
	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
L
Linus Torvalds 已提交
2031 2032
		return;

M
Matt Carlson 已提交
2033 2034 2035
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
M
Michael Chan 已提交
2036 2037 2038
		struct net_device *dev_peer;

		dev_peer = pci_get_drvdata(tp->pdev_peer);
M
Michael Chan 已提交
2039
		/* remove_one() may have been run on the peer. */
M
Michael Chan 已提交
2040
		if (!dev_peer)
M
Michael Chan 已提交
2041 2042 2043
			tp_peer = tp;
		else
			tp_peer = netdev_priv(dev_peer);
L
Linus Torvalds 已提交
2044 2045 2046
	}

	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
M
Michael Chan 已提交
2047 2048 2049
	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
	    (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
	    (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
L
Linus Torvalds 已提交
2050 2051
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2052 2053 2054 2055 2056 2057 2058
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
				    (GRC_LCLCTRL_GPIO_OE0 |
				     GRC_LCLCTRL_GPIO_OE1 |
				     GRC_LCLCTRL_GPIO_OE2 |
				     GRC_LCLCTRL_GPIO_OUTPUT0 |
				     GRC_LCLCTRL_GPIO_OUTPUT1),
				    100);
2059 2060
		} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
			   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
M
Matt Carlson 已提交
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
			/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
			u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
					     GRC_LCLCTRL_GPIO_OE1 |
					     GRC_LCLCTRL_GPIO_OE2 |
					     GRC_LCLCTRL_GPIO_OUTPUT0 |
					     GRC_LCLCTRL_GPIO_OUTPUT1 |
					     tp->grc_local_ctrl;
			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);

			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);

			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
L
Linus Torvalds 已提交
2075 2076
		} else {
			u32 no_gpio2;
M
Michael Chan 已提交
2077
			u32 grc_local_ctrl = 0;
L
Linus Torvalds 已提交
2078 2079 2080 2081 2082

			if (tp_peer != tp &&
			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
				return;

M
Michael Chan 已提交
2083 2084 2085 2086
			/* Workaround to prevent overdrawing Amps. */
			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
			    ASIC_REV_5714) {
				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2087 2088
				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
					    grc_local_ctrl, 100);
M
Michael Chan 已提交
2089 2090
			}

L
Linus Torvalds 已提交
2091 2092 2093 2094
			/* On 5753 and variants, GPIO2 cannot be used. */
			no_gpio2 = tp->nic_sram_data_cfg &
				    NIC_SRAM_DATA_CFG_NO_GPIO2;

M
Michael Chan 已提交
2095
			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
L
Linus Torvalds 已提交
2096 2097 2098 2099 2100 2101 2102 2103
					 GRC_LCLCTRL_GPIO_OE1 |
					 GRC_LCLCTRL_GPIO_OE2 |
					 GRC_LCLCTRL_GPIO_OUTPUT1 |
					 GRC_LCLCTRL_GPIO_OUTPUT2;
			if (no_gpio2) {
				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
						    GRC_LCLCTRL_GPIO_OUTPUT2);
			}
2104 2105
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
						    grc_local_ctrl, 100);
L
Linus Torvalds 已提交
2106 2107 2108

			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;

2109 2110
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
						    grc_local_ctrl, 100);
L
Linus Torvalds 已提交
2111 2112 2113

			if (!no_gpio2) {
				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2114 2115
				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
					    grc_local_ctrl, 100);
L
Linus Torvalds 已提交
2116 2117 2118 2119 2120 2121 2122 2123 2124
			}
		}
	} else {
		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
			if (tp_peer != tp &&
			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
				return;

2125 2126 2127
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
				    (GRC_LCLCTRL_GPIO_OE1 |
				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
L
Linus Torvalds 已提交
2128

2129 2130
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
				    GRC_LCLCTRL_GPIO_OE1, 100);
L
Linus Torvalds 已提交
2131

2132 2133 2134
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
				    (GRC_LCLCTRL_GPIO_OE1 |
				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
L
Linus Torvalds 已提交
2135 2136 2137 2138
		}
	}
}

M
Matt Carlson 已提交
2139 2140 2141 2142
static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
{
	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
		return 1;
2143
	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
M
Matt Carlson 已提交
2144 2145 2146 2147 2148 2149 2150 2151
		if (speed != SPEED_10)
			return 1;
	} else if (speed == SPEED_10)
		return 1;

	return 0;
}

L
Linus Torvalds 已提交
2152 2153 2154 2155 2156 2157 2158 2159 2160
static int tg3_setup_phy(struct tg3 *, int);

#define RESET_KIND_SHUTDOWN	0
#define RESET_KIND_INIT		1
#define RESET_KIND_SUSPEND	2

static void tg3_write_sig_post_reset(struct tg3 *, int);
static int tg3_halt_cpu(struct tg3 *, u32);

2161
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2162
{
2163 2164
	u32 val;

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
			u32 serdes_cfg = tr32(MAC_SERDES_CFG);

			sg_dig_ctrl |=
				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
			tw32(SG_DIG_CTRL, sg_dig_ctrl);
			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
		}
M
Michael Chan 已提交
2175
		return;
2176
	}
M
Michael Chan 已提交
2177

2178 2179 2180 2181 2182 2183
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		tg3_bmcr_reset(tp);
		val = tr32(GRC_MISC_CFG);
		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
		udelay(40);
		return;
M
Matt Carlson 已提交
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
		u32 phytest;
		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
			u32 phy;

			tg3_writephy(tp, MII_ADVERTISE, 0);
			tg3_writephy(tp, MII_BMCR,
				     BMCR_ANENABLE | BMCR_ANRESTART);

			tg3_writephy(tp, MII_TG3_FET_TEST,
				     phytest | MII_TG3_FET_SHADOW_EN);
			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
				tg3_writephy(tp,
					     MII_TG3_FET_SHDW_AUXMODE4,
					     phy);
			}
			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
		}
		return;
2204
	} else if (do_low_power) {
M
Michael Chan 已提交
2205 2206
		tg3_writephy(tp, MII_TG3_EXT_CTRL,
			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2207 2208 2209 2210 2211 2212

		tg3_writephy(tp, MII_TG3_AUX_CTRL,
			     MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
			     MII_TG3_AUXCTL_PCTL_100TX_LPWR |
			     MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
			     MII_TG3_AUXCTL_PCTL_VREG_11V);
M
Michael Chan 已提交
2213
	}
M
Michael Chan 已提交
2214

2215 2216 2217 2218 2219 2220 2221 2222
	/* The PHY should not be powered down on some chips because
	 * of bugs.
	 */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
		return;
2223

2224 2225
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2226 2227 2228 2229 2230 2231
		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
	}

2232 2233 2234
	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
}

2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
/* tp->lock is held. */
static int tg3_nvram_lock(struct tg3 *tp)
{
	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
		int i;

		if (tp->nvram_lock_cnt == 0) {
			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
			for (i = 0; i < 8000; i++) {
				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
					break;
				udelay(20);
			}
			if (i == 8000) {
				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
				return -ENODEV;
			}
		}
		tp->nvram_lock_cnt++;
	}
	return 0;
}

/* tp->lock is held. */
static void tg3_nvram_unlock(struct tg3 *tp)
{
	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
		if (tp->nvram_lock_cnt > 0)
			tp->nvram_lock_cnt--;
		if (tp->nvram_lock_cnt == 0)
			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
	}
}

/* tp->lock is held. */
static void tg3_enable_nvram_access(struct tg3 *tp)
{
	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2273
	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
		u32 nvaccess = tr32(NVRAM_ACCESS);

		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
	}
}

/* tp->lock is held. */
static void tg3_disable_nvram_access(struct tg3 *tp)
{
	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2284
	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
		u32 nvaccess = tr32(NVRAM_ACCESS);

		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
	}
}

static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
					u32 offset, u32 *val)
{
	u32 tmp;
	int i;

	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
		return -EINVAL;

	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
					EEPROM_ADDR_DEVID_MASK |
					EEPROM_ADDR_READ);
	tw32(GRC_EEPROM_ADDR,
	     tmp |
	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
	      EEPROM_ADDR_ADDR_MASK) |
	     EEPROM_ADDR_READ | EEPROM_ADDR_START);

	for (i = 0; i < 1000; i++) {
		tmp = tr32(GRC_EEPROM_ADDR);

		if (tmp & EEPROM_ADDR_COMPLETE)
			break;
		msleep(1);
	}
	if (!(tmp & EEPROM_ADDR_COMPLETE))
		return -EBUSY;

M
Matt Carlson 已提交
2320 2321 2322 2323 2324 2325 2326 2327
	tmp = tr32(GRC_EEPROM_DATA);

	/*
	 * The data will always be opposite the native endian
	 * format.  Perform a blind byteswap to compensate.
	 */
	*val = swab32(tmp);

2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381
	return 0;
}

#define NVRAM_CMD_TIMEOUT 10000

static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
	int i;

	tw32(NVRAM_CMD, nvram_cmd);
	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
		udelay(10);
		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
			udelay(10);
			break;
		}
	}

	if (i == NVRAM_CMD_TIMEOUT)
		return -EBUSY;

	return 0;
}

static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
{
	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
	    (tp->nvram_jedecnum == JEDEC_ATMEL))

		addr = ((addr / tp->nvram_pagesize) <<
			ATMEL_AT45DB0X1B_PAGE_POS) +
		       (addr % tp->nvram_pagesize);

	return addr;
}

static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
{
	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
	    (tp->nvram_jedecnum == JEDEC_ATMEL))

		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
			tp->nvram_pagesize) +
		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));

	return addr;
}

2382 2383 2384 2385 2386 2387
/* NOTE: Data read in from NVRAM is byteswapped according to
 * the byteswapping settings for all other register accesses.
 * tg3 devices are BE devices, so on a BE machine, the data
 * returned will be exactly as it is seen in NVRAM.  On a LE
 * machine, the 32-bit value will be byteswapped.
 */
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
	int ret;

	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
		return tg3_nvram_read_using_eeprom(tp, offset, val);

	offset = tg3_nvram_phys_addr(tp, offset);

	if (offset > NVRAM_ADDR_MSK)
		return -EINVAL;

	ret = tg3_nvram_lock(tp);
	if (ret)
		return ret;

	tg3_enable_nvram_access(tp);

	tw32(NVRAM_ADDR, offset);
	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);

	if (ret == 0)
2411
		*val = tr32(NVRAM_RDDATA);
2412 2413 2414 2415 2416 2417 2418 2419

	tg3_disable_nvram_access(tp);

	tg3_nvram_unlock(tp);

	return ret;
}

2420 2421
/* Ensures NVRAM data is in bytestream format. */
static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2422 2423
{
	u32 v;
2424
	int res = tg3_nvram_read(tp, offset, &v);
2425
	if (!res)
2426
		*val = cpu_to_be32(v);
2427 2428 2429
	return res;
}

2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
/* tp->lock is held. */
static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
{
	u32 addr_high, addr_low;
	int i;

	addr_high = ((tp->dev->dev_addr[0] << 8) |
		     tp->dev->dev_addr[1]);
	addr_low = ((tp->dev->dev_addr[2] << 24) |
		    (tp->dev->dev_addr[3] << 16) |
		    (tp->dev->dev_addr[4] <<  8) |
		    (tp->dev->dev_addr[5] <<  0));
	for (i = 0; i < 4; i++) {
		if (i == 1 && skip_mac_1)
			continue;
		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
	}

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
		for (i = 0; i < 12; i++) {
			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
		}
	}

	addr_high = (tp->dev->dev_addr[0] +
		     tp->dev->dev_addr[1] +
		     tp->dev->dev_addr[2] +
		     tp->dev->dev_addr[3] +
		     tp->dev->dev_addr[4] +
		     tp->dev->dev_addr[5]) &
		TX_BACKOFF_SEED_MASK;
	tw32(MAC_TX_BACKOFF_SEED, addr_high);
}

M
Michael Chan 已提交
2467
static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
L
Linus Torvalds 已提交
2468 2469
{
	u32 misc_host_ctrl;
2470
	bool device_should_wake, do_low_power;
L
Linus Torvalds 已提交
2471 2472 2473 2474 2475 2476 2477 2478 2479

	/* Make sure register accesses (indirect or otherwise)
	 * will function correctly.
	 */
	pci_write_config_dword(tp->pdev,
			       TG3PCI_MISC_HOST_CTRL,
			       tp->misc_host_ctrl);

	switch (state) {
M
Michael Chan 已提交
2480
	case PCI_D0:
2481 2482
		pci_enable_wake(tp->pdev, state, false);
		pci_set_power_state(tp->pdev, PCI_D0);
M
Michael Chan 已提交
2483

M
Michael Chan 已提交
2484 2485
		/* Switch out of Vaux if it is a NIC */
		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2486
			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
L
Linus Torvalds 已提交
2487 2488 2489

		return 0;

M
Michael Chan 已提交
2490 2491 2492
	case PCI_D1:
	case PCI_D2:
	case PCI_D3hot:
L
Linus Torvalds 已提交
2493 2494 2495
		break;

	default:
2496 2497
		netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
			   state);
L
Linus Torvalds 已提交
2498
		return -EINVAL;
2499
	}
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513

	/* Restore the CLKREQ setting. */
	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
		u16 lnkctl;

		pci_read_config_word(tp->pdev,
				     tp->pcie_cap + PCI_EXP_LNKCTL,
				     &lnkctl);
		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
		pci_write_config_word(tp->pdev,
				      tp->pcie_cap + PCI_EXP_LNKCTL,
				      lnkctl);
	}

L
Linus Torvalds 已提交
2514 2515 2516 2517
	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
	tw32(TG3PCI_MISC_HOST_CTRL,
	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);

2518 2519 2520 2521
	device_should_wake = pci_pme_capable(tp->pdev, state) &&
			     device_may_wakeup(&tp->pdev->dev) &&
			     (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);

M
Matt Carlson 已提交
2522
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2523
		do_low_power = false;
M
Matt Carlson 已提交
2524 2525 2526
		if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
		    !tp->link_config.phy_is_low_power) {
			struct phy_device *phydev;
2527
			u32 phyid, advertising;
M
Matt Carlson 已提交
2528

2529
			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
M
Matt Carlson 已提交
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543

			tp->link_config.phy_is_low_power = 1;

			tp->link_config.orig_speed = phydev->speed;
			tp->link_config.orig_duplex = phydev->duplex;
			tp->link_config.orig_autoneg = phydev->autoneg;
			tp->link_config.orig_advertising = phydev->advertising;

			advertising = ADVERTISED_TP |
				      ADVERTISED_Pause |
				      ADVERTISED_Autoneg |
				      ADVERTISED_10baseT_Half;

			if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2544
			    device_should_wake) {
M
Matt Carlson 已提交
2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
				if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
					advertising |=
						ADVERTISED_100baseT_Half |
						ADVERTISED_100baseT_Full |
						ADVERTISED_10baseT_Full;
				else
					advertising |= ADVERTISED_10baseT_Full;
			}

			phydev->advertising = advertising;

			phy_start_aneg(phydev);
2557 2558

			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2559 2560 2561 2562 2563
			if (phyid != PHY_ID_BCMAC131) {
				phyid &= PHY_BCM_OUI_MASK;
				if (phyid == PHY_BCM_OUI_1 ||
				    phyid == PHY_BCM_OUI_2 ||
				    phyid == PHY_BCM_OUI_3)
2564 2565
					do_low_power = true;
			}
M
Matt Carlson 已提交
2566
		}
M
Matt Carlson 已提交
2567
	} else {
M
Matt Carlson 已提交
2568
		do_low_power = true;
2569

M
Matt Carlson 已提交
2570 2571 2572 2573 2574 2575
		if (tp->link_config.phy_is_low_power == 0) {
			tp->link_config.phy_is_low_power = 1;
			tp->link_config.orig_speed = tp->link_config.speed;
			tp->link_config.orig_duplex = tp->link_config.duplex;
			tp->link_config.orig_autoneg = tp->link_config.autoneg;
		}
L
Linus Torvalds 已提交
2576

M
Matt Carlson 已提交
2577 2578 2579 2580 2581 2582
		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
			tp->link_config.speed = SPEED_10;
			tp->link_config.duplex = DUPLEX_HALF;
			tp->link_config.autoneg = AUTONEG_ENABLE;
			tg3_setup_phy(tp, 0);
		}
L
Linus Torvalds 已提交
2583 2584
	}

M
Michael Chan 已提交
2585 2586 2587 2588 2589 2590
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		u32 val;

		val = tr32(GRC_VCPU_EXT_CTRL);
		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
	} else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
M
Michael Chan 已提交
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
		int i;
		u32 val;

		for (i = 0; i < 200; i++) {
			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
				break;
			msleep(1);
		}
	}
G
Gary Zambrano 已提交
2601 2602 2603 2604 2605
	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
						     WOL_DRV_STATE_SHUTDOWN |
						     WOL_DRV_WOL |
						     WOL_SET_MAGIC_PKT);
M
Michael Chan 已提交
2606

2607
	if (device_should_wake) {
L
Linus Torvalds 已提交
2608 2609 2610
		u32 mac_mode;

		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2611
			if (do_low_power) {
M
Matt Carlson 已提交
2612 2613 2614
				tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
				udelay(40);
			}
L
Linus Torvalds 已提交
2615

M
Michael Chan 已提交
2616 2617 2618 2619
			if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
				mac_mode = MAC_MODE_PORT_MODE_GMII;
			else
				mac_mode = MAC_MODE_PORT_MODE_MII;
L
Linus Torvalds 已提交
2620

M
Matt Carlson 已提交
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
			    ASIC_REV_5700) {
				u32 speed = (tp->tg3_flags &
					     TG3_FLAG_WOL_SPEED_100MB) ?
					     SPEED_100 : SPEED_10;
				if (tg3_5700_link_polarity(tp, speed))
					mac_mode |= MAC_MODE_LINK_POLARITY;
				else
					mac_mode &= ~MAC_MODE_LINK_POLARITY;
			}
L
Linus Torvalds 已提交
2632 2633 2634 2635
		} else {
			mac_mode = MAC_MODE_PORT_MODE_TBI;
		}

2636
		if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
L
Linus Torvalds 已提交
2637 2638
			tw32(MAC_LED_CTRL, tp->led_ctrl);

2639 2640 2641 2642 2643 2644
		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
		if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
		    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
		     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
L
Linus Torvalds 已提交
2645

2646 2647 2648 2649 2650 2651 2652
		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
			mac_mode |= tp->mac_mode &
				    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
			if (mac_mode & MAC_MODE_APE_TX_EN)
				mac_mode |= MAC_MODE_TDE_ENABLE;
		}

L
Linus Torvalds 已提交
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
		tw32_f(MAC_MODE, mac_mode);
		udelay(100);

		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
		udelay(10);
	}

	if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
		u32 base_val;

		base_val = tp->pci_clock_ctrl;
		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
			     CLOCK_CTRL_TXCLK_DISABLE);

2669 2670
		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
2671
	} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2672
		   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2673
		   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
M
Michael Chan 已提交
2674
		/* do nothing */
M
Michael Chan 已提交
2675
	} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
L
Linus Torvalds 已提交
2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
		     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
		u32 newbits1, newbits2;

		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
				    CLOCK_CTRL_TXCLK_DISABLE |
				    CLOCK_CTRL_ALTCLK);
			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
		} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
			newbits1 = CLOCK_CTRL_625_CORE;
			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
		} else {
			newbits1 = CLOCK_CTRL_ALTCLK;
			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
		}

2693 2694
		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
			    40);
L
Linus Torvalds 已提交
2695

2696 2697
		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
			    40);
L
Linus Torvalds 已提交
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710

		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
			u32 newbits3;

			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
					    CLOCK_CTRL_TXCLK_DISABLE |
					    CLOCK_CTRL_44MHZ_CORE);
			} else {
				newbits3 = CLOCK_CTRL_44MHZ_CORE;
			}

2711 2712
			tw32_wait_f(TG3PCI_CLOCK_CTRL,
				    tp->pci_clock_ctrl | newbits3, 40);
L
Linus Torvalds 已提交
2713 2714 2715
		}
	}

2716
	if (!(device_should_wake) &&
M
Matt Carlson 已提交
2717
	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2718
		tg3_power_down_phy(tp, do_low_power);
M
Michael Chan 已提交
2719

L
Linus Torvalds 已提交
2720 2721 2722 2723 2724 2725 2726 2727 2728
	tg3_frob_aux_power(tp);

	/* Workaround for unstable PLL clock */
	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
		u32 val = tr32(0x7d00);

		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
		tw32(0x7d00, val);
M
Michael Chan 已提交
2729
		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
M
Michael Chan 已提交
2730 2731 2732
			int err;

			err = tg3_nvram_lock(tp);
L
Linus Torvalds 已提交
2733
			tg3_halt_cpu(tp, RX_CPU_BASE);
M
Michael Chan 已提交
2734 2735
			if (!err)
				tg3_nvram_unlock(tp);
M
Michael Chan 已提交
2736
		}
L
Linus Torvalds 已提交
2737 2738
	}

2739 2740
	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);

2741
	if (device_should_wake)
2742 2743
		pci_enable_wake(tp->pdev, state, true);

L
Linus Torvalds 已提交
2744
	/* Finally, set the new power state. */
2745
	pci_set_power_state(tp->pdev, state);
L
Linus Torvalds 已提交
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783

	return 0;
}

static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
{
	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
	case MII_TG3_AUX_STAT_10HALF:
		*speed = SPEED_10;
		*duplex = DUPLEX_HALF;
		break;

	case MII_TG3_AUX_STAT_10FULL:
		*speed = SPEED_10;
		*duplex = DUPLEX_FULL;
		break;

	case MII_TG3_AUX_STAT_100HALF:
		*speed = SPEED_100;
		*duplex = DUPLEX_HALF;
		break;

	case MII_TG3_AUX_STAT_100FULL:
		*speed = SPEED_100;
		*duplex = DUPLEX_FULL;
		break;

	case MII_TG3_AUX_STAT_1000HALF:
		*speed = SPEED_1000;
		*duplex = DUPLEX_HALF;
		break;

	case MII_TG3_AUX_STAT_1000FULL:
		*speed = SPEED_1000;
		*duplex = DUPLEX_FULL;
		break;

	default:
2784
		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
M
Michael Chan 已提交
2785 2786 2787 2788 2789 2790
			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
				 SPEED_10;
			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
				  DUPLEX_HALF;
			break;
		}
L
Linus Torvalds 已提交
2791 2792 2793
		*speed = SPEED_INVALID;
		*duplex = DUPLEX_INVALID;
		break;
2794
	}
L
Linus Torvalds 已提交
2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819
}

static void tg3_phy_copper_begin(struct tg3 *tp)
{
	u32 new_adv;
	int i;

	if (tp->link_config.phy_is_low_power) {
		/* Entering low power mode.  Disable gigabit and
		 * 100baseT advertisements.
		 */
		tg3_writephy(tp, MII_TG3_CTRL, 0);

		new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
		if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
			new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);

		tg3_writephy(tp, MII_ADVERTISE, new_adv);
	} else if (tp->link_config.speed == SPEED_INVALID) {
		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
			tp->link_config.advertising &=
				~(ADVERTISED_1000baseT_Half |
				  ADVERTISED_1000baseT_Full);

2820
		new_adv = ADVERTISE_CSMA;
L
Linus Torvalds 已提交
2821 2822 2823 2824 2825 2826 2827 2828
		if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
			new_adv |= ADVERTISE_10HALF;
		if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
			new_adv |= ADVERTISE_10FULL;
		if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
			new_adv |= ADVERTISE_100HALF;
		if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
			new_adv |= ADVERTISE_100FULL;
2829 2830 2831

		new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);

L
Linus Torvalds 已提交
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
		tg3_writephy(tp, MII_ADVERTISE, new_adv);

		if (tp->link_config.advertising &
		    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
			new_adv = 0;
			if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
				new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
				new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
				new_adv |= (MII_TG3_CTRL_AS_MASTER |
					    MII_TG3_CTRL_ENABLE_AS_MASTER);
			tg3_writephy(tp, MII_TG3_CTRL, new_adv);
		} else {
			tg3_writephy(tp, MII_TG3_CTRL, 0);
		}
	} else {
2851 2852 2853
		new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
		new_adv |= ADVERTISE_CSMA;

L
Linus Torvalds 已提交
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
		/* Asking for a specific link mode. */
		if (tp->link_config.speed == SPEED_1000) {
			tg3_writephy(tp, MII_ADVERTISE, new_adv);

			if (tp->link_config.duplex == DUPLEX_FULL)
				new_adv = MII_TG3_CTRL_ADV_1000_FULL;
			else
				new_adv = MII_TG3_CTRL_ADV_1000_HALF;
			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
				new_adv |= (MII_TG3_CTRL_AS_MASTER |
					    MII_TG3_CTRL_ENABLE_AS_MASTER);
		} else {
			if (tp->link_config.speed == SPEED_100) {
				if (tp->link_config.duplex == DUPLEX_FULL)
					new_adv |= ADVERTISE_100FULL;
				else
					new_adv |= ADVERTISE_100HALF;
			} else {
				if (tp->link_config.duplex == DUPLEX_FULL)
					new_adv |= ADVERTISE_10FULL;
				else
					new_adv |= ADVERTISE_10HALF;
			}
			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2879 2880

			new_adv = 0;
L
Linus Torvalds 已提交
2881
		}
2882 2883

		tg3_writephy(tp, MII_TG3_CTRL, new_adv);
L
Linus Torvalds 已提交
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
	}

	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
	    tp->link_config.speed != SPEED_INVALID) {
		u32 bmcr, orig_bmcr;

		tp->link_config.active_speed = tp->link_config.speed;
		tp->link_config.active_duplex = tp->link_config.duplex;

		bmcr = 0;
		switch (tp->link_config.speed) {
		default:
		case SPEED_10:
			break;

		case SPEED_100:
			bmcr |= BMCR_SPEED100;
			break;

		case SPEED_1000:
			bmcr |= TG3_BMCR_SPEED1000;
			break;
2906
		}
L
Linus Torvalds 已提交
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962

		if (tp->link_config.duplex == DUPLEX_FULL)
			bmcr |= BMCR_FULLDPLX;

		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
		    (bmcr != orig_bmcr)) {
			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
			for (i = 0; i < 1500; i++) {
				u32 tmp;

				udelay(10);
				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
				    tg3_readphy(tp, MII_BMSR, &tmp))
					continue;
				if (!(tmp & BMSR_LSTATUS)) {
					udelay(40);
					break;
				}
			}
			tg3_writephy(tp, MII_BMCR, bmcr);
			udelay(40);
		}
	} else {
		tg3_writephy(tp, MII_BMCR,
			     BMCR_ANENABLE | BMCR_ANRESTART);
	}
}

static int tg3_init_5401phy_dsp(struct tg3 *tp)
{
	int err;

	/* Turn off tap power management. */
	/* Set Extended packet length bit */
	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);

	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);

	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);

	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);

	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);

	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);

	udelay(40);

	return err;
}

2963
static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
L
Linus Torvalds 已提交
2964
{
2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
	u32 adv_reg, all_mask = 0;

	if (mask & ADVERTISED_10baseT_Half)
		all_mask |= ADVERTISE_10HALF;
	if (mask & ADVERTISED_10baseT_Full)
		all_mask |= ADVERTISE_10FULL;
	if (mask & ADVERTISED_100baseT_Half)
		all_mask |= ADVERTISE_100HALF;
	if (mask & ADVERTISED_100baseT_Full)
		all_mask |= ADVERTISE_100FULL;
L
Linus Torvalds 已提交
2975 2976 2977 2978 2979 2980 2981 2982 2983

	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
		return 0;

	if ((adv_reg & all_mask) != all_mask)
		return 0;
	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
		u32 tg3_ctrl;

2984 2985 2986 2987 2988 2989
		all_mask = 0;
		if (mask & ADVERTISED_1000baseT_Half)
			all_mask |= ADVERTISE_1000HALF;
		if (mask & ADVERTISED_1000baseT_Full)
			all_mask |= ADVERTISE_1000FULL;

L
Linus Torvalds 已提交
2990 2991 2992 2993 2994 2995 2996 2997 2998
		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
			return 0;

		if ((tg3_ctrl & all_mask) != all_mask)
			return 0;
	}
	return 1;
}

2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
{
	u32 curadv, reqadv;

	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
		return 1;

	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);

	if (tp->link_config.active_duplex == DUPLEX_FULL) {
		if (curadv != reqadv)
			return 0;

		if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
			tg3_readphy(tp, MII_LPA, rmtadv);
	} else {
		/* Reprogram the advertisement register, even if it
		 * does not affect the current link.  If the link
		 * gets renegotiated in the future, we can save an
		 * additional renegotiation cycle by advertising
		 * it correctly in the first place.
		 */
		if (curadv != reqadv) {
			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
				     ADVERTISE_PAUSE_ASYM);
			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
		}
	}

	return 1;
}

L
Linus Torvalds 已提交
3032 3033 3034 3035
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
{
	int current_link_up;
	u32 bmsr, dummy;
3036
	u32 lcl_adv, rmt_adv;
L
Linus Torvalds 已提交
3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
	u16 current_speed;
	u8 current_duplex;
	int i, err;

	tw32(MAC_EVENT, 0);

	tw32_f(MAC_STATUS,
	     (MAC_STATUS_SYNC_CHANGED |
	      MAC_STATUS_CFG_CHANGED |
	      MAC_STATUS_MI_COMPLETION |
	      MAC_STATUS_LNKSTATE_CHANGED));
	udelay(40);

3050 3051 3052 3053 3054
	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
		tw32_f(MAC_MI_MODE,
		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
		udelay(80);
	}
L
Linus Torvalds 已提交
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072

	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);

	/* Some third-party PHYs need to be reset on link going
	 * down.
	 */
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
	    netif_carrier_ok(tp->dev)) {
		tg3_readphy(tp, MII_BMSR, &bmsr);
		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
		    !(bmsr & BMSR_LSTATUS))
			force_reset = 1;
	}
	if (force_reset)
		tg3_phy_reset(tp);

3073
	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
L
Linus Torvalds 已提交
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
		tg3_readphy(tp, MII_BMSR, &bmsr);
		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
		    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
			bmsr = 0;

		if (!(bmsr & BMSR_LSTATUS)) {
			err = tg3_init_5401phy_dsp(tp);
			if (err)
				return err;

			tg3_readphy(tp, MII_BMSR, &bmsr);
			for (i = 0; i < 1000; i++) {
				udelay(10);
				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
				    (bmsr & BMSR_LSTATUS)) {
					udelay(40);
					break;
				}
			}

3094 3095
			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
			    TG3_PHY_REV_BCM5401_B0 &&
L
Linus Torvalds 已提交
3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
			    !(bmsr & BMSR_LSTATUS) &&
			    tp->link_config.active_speed == SPEED_1000) {
				err = tg3_phy_reset(tp);
				if (!err)
					err = tg3_init_5401phy_dsp(tp);
				if (err)
					return err;
			}
		}
	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
		/* 5701 {A0,B0} CRC bug workaround */
		tg3_writephy(tp, 0x15, 0x0a75);
		tg3_writephy(tp, 0x1c, 0x8c68);
		tg3_writephy(tp, 0x1c, 0x8d68);
		tg3_writephy(tp, 0x1c, 0x8c68);
	}

	/* Clear pending interrupts... */
	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);

	if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3120
	else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
L
Linus Torvalds 已提交
3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181
		tg3_writephy(tp, MII_TG3_IMASK, ~0);

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
			tg3_writephy(tp, MII_TG3_EXT_CTRL,
				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
		else
			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
	}

	current_link_up = 0;
	current_speed = SPEED_INVALID;
	current_duplex = DUPLEX_INVALID;

	if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
		u32 val;

		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
		tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
		if (!(val & (1 << 10))) {
			val |= (1 << 10);
			tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
			goto relink;
		}
	}

	bmsr = 0;
	for (i = 0; i < 100; i++) {
		tg3_readphy(tp, MII_BMSR, &bmsr);
		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
		    (bmsr & BMSR_LSTATUS))
			break;
		udelay(40);
	}

	if (bmsr & BMSR_LSTATUS) {
		u32 aux_stat, bmcr;

		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
		for (i = 0; i < 2000; i++) {
			udelay(10);
			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
			    aux_stat)
				break;
		}

		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
					     &current_speed,
					     &current_duplex);

		bmcr = 0;
		for (i = 0; i < 200; i++) {
			tg3_readphy(tp, MII_BMCR, &bmcr);
			if (tg3_readphy(tp, MII_BMCR, &bmcr))
				continue;
			if (bmcr && bmcr != 0x7fff)
				break;
			udelay(10);
		}

3182 3183
		lcl_adv = 0;
		rmt_adv = 0;
L
Linus Torvalds 已提交
3184

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
		tp->link_config.active_speed = current_speed;
		tp->link_config.active_duplex = current_duplex;

		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
			if ((bmcr & BMCR_ANENABLE) &&
			    tg3_copper_is_advertising_all(tp,
						tp->link_config.advertising)) {
				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
								  &rmt_adv))
					current_link_up = 1;
L
Linus Torvalds 已提交
3195 3196 3197 3198
			}
		} else {
			if (!(bmcr & BMCR_ANENABLE) &&
			    tp->link_config.speed == current_speed &&
3199 3200 3201
			    tp->link_config.duplex == current_duplex &&
			    tp->link_config.flowctrl ==
			    tp->link_config.active_flowctrl) {
L
Linus Torvalds 已提交
3202 3203 3204 3205
				current_link_up = 1;
			}
		}

3206 3207 3208
		if (current_link_up == 1 &&
		    tp->link_config.active_duplex == DUPLEX_FULL)
			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
L
Linus Torvalds 已提交
3209 3210 3211
	}

relink:
M
Michael Chan 已提交
3212
	if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
L
Linus Torvalds 已提交
3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229
		u32 tmp;

		tg3_phy_copper_begin(tp);

		tg3_readphy(tp, MII_BMSR, &tmp);
		if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
		    (tmp & BMSR_LSTATUS))
			current_link_up = 1;
	}

	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
	if (current_link_up == 1) {
		if (tp->link_config.active_speed == SPEED_100 ||
		    tp->link_config.active_speed == SPEED_10)
			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
		else
			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3230 3231 3232
	} else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
	else
L
Linus Torvalds 已提交
3233 3234 3235 3236 3237 3238 3239
		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;

	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
	if (tp->link_config.active_duplex == DUPLEX_HALF)
		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
M
Matt Carlson 已提交
3240 3241
		if (current_link_up == 1 &&
		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
L
Linus Torvalds 已提交
3242
			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
M
Matt Carlson 已提交
3243 3244
		else
			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
L
Linus Torvalds 已提交
3245 3246 3247 3248 3249
	}

	/* ??? Without this setting Netgear GA302T PHY does not
	 * ??? send/receive packets...
	 */
3250
	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
L
Linus Torvalds 已提交
3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
		tw32_f(MAC_MI_MODE, tp->mi_mode);
		udelay(80);
	}

	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

	if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
		/* Polled via timer. */
		tw32_f(MAC_EVENT, 0);
	} else {
		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
	}
	udelay(40);

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
	    current_link_up == 1 &&
	    tp->link_config.active_speed == SPEED_1000 &&
	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
		udelay(120);
		tw32_f(MAC_STATUS,
		     (MAC_STATUS_SYNC_CHANGED |
		      MAC_STATUS_CFG_CHANGED));
		udelay(40);
		tg3_write_mem(tp,
			      NIC_SRAM_FIRMWARE_MBOX,
			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
	}

3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300
	/* Prevent send BD corruption. */
	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
		u16 oldlnkctl, newlnkctl;

		pci_read_config_word(tp->pdev,
				     tp->pcie_cap + PCI_EXP_LNKCTL,
				     &oldlnkctl);
		if (tp->link_config.active_speed == SPEED_100 ||
		    tp->link_config.active_speed == SPEED_10)
			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
		else
			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
		if (newlnkctl != oldlnkctl)
			pci_write_config_word(tp->pdev,
					      tp->pcie_cap + PCI_EXP_LNKCTL,
					      newlnkctl);
	}

L
Linus Torvalds 已提交
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
	if (current_link_up != netif_carrier_ok(tp->dev)) {
		if (current_link_up)
			netif_carrier_on(tp->dev);
		else
			netif_carrier_off(tp->dev);
		tg3_link_report(tp);
	}

	return 0;
}

struct tg3_fiber_aneginfo {
	int state;
#define ANEG_STATE_UNKNOWN		0
#define ANEG_STATE_AN_ENABLE		1
#define ANEG_STATE_RESTART_INIT		2
#define ANEG_STATE_RESTART		3
#define ANEG_STATE_DISABLE_LINK_OK	4
#define ANEG_STATE_ABILITY_DETECT_INIT	5
#define ANEG_STATE_ABILITY_DETECT	6
#define ANEG_STATE_ACK_DETECT_INIT	7
#define ANEG_STATE_ACK_DETECT		8
#define ANEG_STATE_COMPLETE_ACK_INIT	9
#define ANEG_STATE_COMPLETE_ACK		10
#define ANEG_STATE_IDLE_DETECT_INIT	11
#define ANEG_STATE_IDLE_DETECT		12
#define ANEG_STATE_LINK_OK		13
#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
#define ANEG_STATE_NEXT_PAGE_WAIT	15

	u32 flags;
#define MR_AN_ENABLE		0x00000001
#define MR_RESTART_AN		0x00000002
#define MR_AN_COMPLETE		0x00000004
#define MR_PAGE_RX		0x00000008
#define MR_NP_LOADED		0x00000010
#define MR_TOGGLE_TX		0x00000020
#define MR_LP_ADV_FULL_DUPLEX	0x00000040
#define MR_LP_ADV_HALF_DUPLEX	0x00000080
#define MR_LP_ADV_SYM_PAUSE	0x00000100
#define MR_LP_ADV_ASYM_PAUSE	0x00000200
#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
#define MR_LP_ADV_NEXT_PAGE	0x00001000
#define MR_TOGGLE_RX		0x00002000
#define MR_NP_RX		0x00004000

#define MR_LINK_OK		0x80000000

	unsigned long link_time, cur_time;

	u32 ability_match_cfg;
	int ability_match_count;

	char ability_match, idle_match, ack_match;

	u32 txconfig, rxconfig;
#define ANEG_CFG_NP		0x00000080
#define ANEG_CFG_ACK		0x00000040
#define ANEG_CFG_RF2		0x00000020
#define ANEG_CFG_RF1		0x00000010
#define ANEG_CFG_PS2		0x00000001
#define ANEG_CFG_PS1		0x00008000
#define ANEG_CFG_HD		0x00004000
#define ANEG_CFG_FD		0x00002000
#define ANEG_CFG_INVAL		0x00001f06

};
#define ANEG_OK		0
#define ANEG_DONE	1
#define ANEG_TIMER_ENAB	2
#define ANEG_FAILED	-1

#define ANEG_STATE_SETTLE_TIME	10000

static int tg3_fiber_aneg_smachine(struct tg3 *tp,
				   struct tg3_fiber_aneginfo *ap)
{
3379
	u16 flowctrl;
L
Linus Torvalds 已提交
3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
	unsigned long delta;
	u32 rx_cfg_reg;
	int ret;

	if (ap->state == ANEG_STATE_UNKNOWN) {
		ap->rxconfig = 0;
		ap->link_time = 0;
		ap->cur_time = 0;
		ap->ability_match_cfg = 0;
		ap->ability_match_count = 0;
		ap->ability_match = 0;
		ap->idle_match = 0;
		ap->ack_match = 0;
	}
	ap->cur_time++;

	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);

		if (rx_cfg_reg != ap->ability_match_cfg) {
			ap->ability_match_cfg = rx_cfg_reg;
			ap->ability_match = 0;
			ap->ability_match_count = 0;
		} else {
			if (++ap->ability_match_count > 1) {
				ap->ability_match = 1;
				ap->ability_match_cfg = rx_cfg_reg;
			}
		}
		if (rx_cfg_reg & ANEG_CFG_ACK)
			ap->ack_match = 1;
		else
			ap->ack_match = 0;

		ap->idle_match = 0;
	} else {
		ap->idle_match = 1;
		ap->ability_match_cfg = 0;
		ap->ability_match_count = 0;
		ap->ability_match = 0;
		ap->ack_match = 0;

		rx_cfg_reg = 0;
	}

	ap->rxconfig = rx_cfg_reg;
	ret = ANEG_OK;

	switch(ap->state) {
	case ANEG_STATE_UNKNOWN:
		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
			ap->state = ANEG_STATE_AN_ENABLE;

		/* fallthru */
	case ANEG_STATE_AN_ENABLE:
		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
		if (ap->flags & MR_AN_ENABLE) {
			ap->link_time = 0;
			ap->cur_time = 0;
			ap->ability_match_cfg = 0;
			ap->ability_match_count = 0;
			ap->ability_match = 0;
			ap->idle_match = 0;
			ap->ack_match = 0;

			ap->state = ANEG_STATE_RESTART_INIT;
		} else {
			ap->state = ANEG_STATE_DISABLE_LINK_OK;
		}
		break;

	case ANEG_STATE_RESTART_INIT:
		ap->link_time = ap->cur_time;
		ap->flags &= ~(MR_NP_LOADED);
		ap->txconfig = 0;
		tw32(MAC_TX_AUTO_NEG, 0);
		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);

		ret = ANEG_TIMER_ENAB;
		ap->state = ANEG_STATE_RESTART;

		/* fallthru */
	case ANEG_STATE_RESTART:
		delta = ap->cur_time - ap->link_time;
		if (delta > ANEG_STATE_SETTLE_TIME) {
			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
		} else {
			ret = ANEG_TIMER_ENAB;
		}
		break;

	case ANEG_STATE_DISABLE_LINK_OK:
		ret = ANEG_DONE;
		break;

	case ANEG_STATE_ABILITY_DETECT_INIT:
		ap->flags &= ~(MR_TOGGLE_TX);
3479 3480 3481 3482 3483 3484
		ap->txconfig = ANEG_CFG_FD;
		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
		if (flowctrl & ADVERTISE_1000XPAUSE)
			ap->txconfig |= ANEG_CFG_PS1;
		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
			ap->txconfig |= ANEG_CFG_PS2;
L
Linus Torvalds 已提交
3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624
		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);

		ap->state = ANEG_STATE_ABILITY_DETECT;
		break;

	case ANEG_STATE_ABILITY_DETECT:
		if (ap->ability_match != 0 && ap->rxconfig != 0) {
			ap->state = ANEG_STATE_ACK_DETECT_INIT;
		}
		break;

	case ANEG_STATE_ACK_DETECT_INIT:
		ap->txconfig |= ANEG_CFG_ACK;
		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);

		ap->state = ANEG_STATE_ACK_DETECT;

		/* fallthru */
	case ANEG_STATE_ACK_DETECT:
		if (ap->ack_match != 0) {
			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
			} else {
				ap->state = ANEG_STATE_AN_ENABLE;
			}
		} else if (ap->ability_match != 0 &&
			   ap->rxconfig == 0) {
			ap->state = ANEG_STATE_AN_ENABLE;
		}
		break;

	case ANEG_STATE_COMPLETE_ACK_INIT:
		if (ap->rxconfig & ANEG_CFG_INVAL) {
			ret = ANEG_FAILED;
			break;
		}
		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
			       MR_LP_ADV_HALF_DUPLEX |
			       MR_LP_ADV_SYM_PAUSE |
			       MR_LP_ADV_ASYM_PAUSE |
			       MR_LP_ADV_REMOTE_FAULT1 |
			       MR_LP_ADV_REMOTE_FAULT2 |
			       MR_LP_ADV_NEXT_PAGE |
			       MR_TOGGLE_RX |
			       MR_NP_RX);
		if (ap->rxconfig & ANEG_CFG_FD)
			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
		if (ap->rxconfig & ANEG_CFG_HD)
			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
		if (ap->rxconfig & ANEG_CFG_PS1)
			ap->flags |= MR_LP_ADV_SYM_PAUSE;
		if (ap->rxconfig & ANEG_CFG_PS2)
			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
		if (ap->rxconfig & ANEG_CFG_RF1)
			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
		if (ap->rxconfig & ANEG_CFG_RF2)
			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
		if (ap->rxconfig & ANEG_CFG_NP)
			ap->flags |= MR_LP_ADV_NEXT_PAGE;

		ap->link_time = ap->cur_time;

		ap->flags ^= (MR_TOGGLE_TX);
		if (ap->rxconfig & 0x0008)
			ap->flags |= MR_TOGGLE_RX;
		if (ap->rxconfig & ANEG_CFG_NP)
			ap->flags |= MR_NP_RX;
		ap->flags |= MR_PAGE_RX;

		ap->state = ANEG_STATE_COMPLETE_ACK;
		ret = ANEG_TIMER_ENAB;
		break;

	case ANEG_STATE_COMPLETE_ACK:
		if (ap->ability_match != 0 &&
		    ap->rxconfig == 0) {
			ap->state = ANEG_STATE_AN_ENABLE;
			break;
		}
		delta = ap->cur_time - ap->link_time;
		if (delta > ANEG_STATE_SETTLE_TIME) {
			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
			} else {
				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
				    !(ap->flags & MR_NP_RX)) {
					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
				} else {
					ret = ANEG_FAILED;
				}
			}
		}
		break;

	case ANEG_STATE_IDLE_DETECT_INIT:
		ap->link_time = ap->cur_time;
		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);

		ap->state = ANEG_STATE_IDLE_DETECT;
		ret = ANEG_TIMER_ENAB;
		break;

	case ANEG_STATE_IDLE_DETECT:
		if (ap->ability_match != 0 &&
		    ap->rxconfig == 0) {
			ap->state = ANEG_STATE_AN_ENABLE;
			break;
		}
		delta = ap->cur_time - ap->link_time;
		if (delta > ANEG_STATE_SETTLE_TIME) {
			/* XXX another gem from the Broadcom driver :( */
			ap->state = ANEG_STATE_LINK_OK;
		}
		break;

	case ANEG_STATE_LINK_OK:
		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
		ret = ANEG_DONE;
		break;

	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
		/* ??? unimplemented */
		break;

	case ANEG_STATE_NEXT_PAGE_WAIT:
		/* ??? unimplemented */
		break;

	default:
		ret = ANEG_FAILED;
		break;
3625
	}
L
Linus Torvalds 已提交
3626 3627 3628 3629

	return ret;
}

3630
static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
L
Linus Torvalds 已提交
3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
{
	int res = 0;
	struct tg3_fiber_aneginfo aninfo;
	int status = ANEG_FAILED;
	unsigned int tick;
	u32 tmp;

	tw32_f(MAC_TX_AUTO_NEG, 0);

	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
	udelay(40);

	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
	udelay(40);

	memset(&aninfo, 0, sizeof(aninfo));
	aninfo.flags |= MR_AN_ENABLE;
	aninfo.state = ANEG_STATE_UNKNOWN;
	aninfo.cur_time = 0;
	tick = 0;
	while (++tick < 195000) {
		status = tg3_fiber_aneg_smachine(tp, &aninfo);
		if (status == ANEG_DONE || status == ANEG_FAILED)
			break;

		udelay(1);
	}

	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

3664 3665
	*txflags = aninfo.txconfig;
	*rxflags = aninfo.flags;
L
Linus Torvalds 已提交
3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726

	if (status == ANEG_DONE &&
	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
			     MR_LP_ADV_FULL_DUPLEX)))
		res = 1;

	return res;
}

static void tg3_init_bcm8002(struct tg3 *tp)
{
	u32 mac_status = tr32(MAC_STATUS);
	int i;

	/* Reset when initting first time or we have a link. */
	if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
	    !(mac_status & MAC_STATUS_PCS_SYNCED))
		return;

	/* Set PLL lock range. */
	tg3_writephy(tp, 0x16, 0x8007);

	/* SW reset */
	tg3_writephy(tp, MII_BMCR, BMCR_RESET);

	/* Wait for reset to complete. */
	/* XXX schedule_timeout() ... */
	for (i = 0; i < 500; i++)
		udelay(10);

	/* Config mode; select PMA/Ch 1 regs. */
	tg3_writephy(tp, 0x10, 0x8411);

	/* Enable auto-lock and comdet, select txclk for tx. */
	tg3_writephy(tp, 0x11, 0x0a10);

	tg3_writephy(tp, 0x18, 0x00a0);
	tg3_writephy(tp, 0x16, 0x41ff);

	/* Assert and deassert POR. */
	tg3_writephy(tp, 0x13, 0x0400);
	udelay(40);
	tg3_writephy(tp, 0x13, 0x0000);

	tg3_writephy(tp, 0x11, 0x0a50);
	udelay(40);
	tg3_writephy(tp, 0x11, 0x0a10);

	/* Wait for signal to stabilize */
	/* XXX schedule_timeout() ... */
	for (i = 0; i < 15000; i++)
		udelay(10);

	/* Deselect the channel register so we can read the PHYID
	 * later.
	 */
	tg3_writephy(tp, 0x10, 0x8011);
}

static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
{
3727
	u16 flowctrl;
L
Linus Torvalds 已提交
3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752
	u32 sg_dig_ctrl, sg_dig_status;
	u32 serdes_cfg, expected_sg_dig_ctrl;
	int workaround, port_a;
	int current_link_up;

	serdes_cfg = 0;
	expected_sg_dig_ctrl = 0;
	workaround = 0;
	port_a = 1;
	current_link_up = 0;

	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
		workaround = 1;
		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
			port_a = 0;

		/* preserve bits 0-11,13,14 for signal pre-emphasis */
		/* preserve bits 20-23 for voltage regulator */
		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
	}

	sg_dig_ctrl = tr32(SG_DIG_CTRL);

	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3753
		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
L
Linus Torvalds 已提交
3754 3755 3756 3757 3758 3759 3760 3761 3762
			if (workaround) {
				u32 val = serdes_cfg;

				if (port_a)
					val |= 0xc010000;
				else
					val |= 0x4010000;
				tw32_f(MAC_SERDES_CFG, val);
			}
3763 3764

			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
L
Linus Torvalds 已提交
3765 3766 3767 3768 3769 3770 3771 3772 3773
		}
		if (mac_status & MAC_STATUS_PCS_SYNCED) {
			tg3_setup_flow_control(tp, 0, 0);
			current_link_up = 1;
		}
		goto out;
	}

	/* Want auto-negotiation.  */
3774
	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
L
Linus Torvalds 已提交
3775

3776 3777 3778 3779 3780
	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
	if (flowctrl & ADVERTISE_1000XPAUSE)
		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
L
Linus Torvalds 已提交
3781 3782

	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
M
Michael Chan 已提交
3783 3784 3785 3786 3787 3788 3789 3790 3791 3792
		if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
		    tp->serdes_counter &&
		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
				    MAC_STATUS_RCVD_CFG)) ==
		     MAC_STATUS_PCS_SYNCED)) {
			tp->serdes_counter--;
			current_link_up = 1;
			goto out;
		}
restart_autoneg:
L
Linus Torvalds 已提交
3793 3794
		if (workaround)
			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3795
		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
L
Linus Torvalds 已提交
3796 3797 3798
		udelay(5);
		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);

M
Michael Chan 已提交
3799 3800
		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
L
Linus Torvalds 已提交
3801 3802
	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
				 MAC_STATUS_SIGNAL_DET)) {
M
Michael Chan 已提交
3803
		sg_dig_status = tr32(SG_DIG_STATUS);
L
Linus Torvalds 已提交
3804 3805
		mac_status = tr32(MAC_STATUS);

3806
		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
L
Linus Torvalds 已提交
3807
		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
3808 3809 3810 3811 3812 3813
			u32 local_adv = 0, remote_adv = 0;

			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
				local_adv |= ADVERTISE_1000XPAUSE;
			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
				local_adv |= ADVERTISE_1000XPSE_ASYM;
L
Linus Torvalds 已提交
3814

3815
			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3816
				remote_adv |= LPA_1000XPAUSE;
3817
			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3818
				remote_adv |= LPA_1000XPAUSE_ASYM;
L
Linus Torvalds 已提交
3819 3820 3821

			tg3_setup_flow_control(tp, local_adv, remote_adv);
			current_link_up = 1;
M
Michael Chan 已提交
3822 3823
			tp->serdes_counter = 0;
			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824
		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
M
Michael Chan 已提交
3825 3826
			if (tp->serdes_counter)
				tp->serdes_counter--;
L
Linus Torvalds 已提交
3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
			else {
				if (workaround) {
					u32 val = serdes_cfg;

					if (port_a)
						val |= 0xc010000;
					else
						val |= 0x4010000;

					tw32_f(MAC_SERDES_CFG, val);
				}

3839
				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
L
Linus Torvalds 已提交
3840 3841 3842 3843 3844 3845 3846 3847 3848 3849
				udelay(40);

				/* Link parallel detection - link is up */
				/* only if we have PCS_SYNC and not */
				/* receiving config code words */
				mac_status = tr32(MAC_STATUS);
				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
					tg3_setup_flow_control(tp, 0, 0);
					current_link_up = 1;
M
Michael Chan 已提交
3850 3851 3852 3853 3854 3855
					tp->tg3_flags2 |=
						TG3_FLG2_PARALLEL_DETECT;
					tp->serdes_counter =
						SERDES_PARALLEL_DET_TIMEOUT;
				} else
					goto restart_autoneg;
L
Linus Torvalds 已提交
3856 3857
			}
		}
M
Michael Chan 已提交
3858 3859 3860
	} else {
		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
L
Linus Torvalds 已提交
3861 3862 3863 3864 3865 3866 3867 3868 3869 3870
	}

out:
	return current_link_up;
}

static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
{
	int current_link_up = 0;

3871
	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
L
Linus Torvalds 已提交
3872 3873 3874
		goto out;

	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3875
		u32 txflags, rxflags;
L
Linus Torvalds 已提交
3876
		int i;
3877

3878 3879
		if (fiber_autoneg(tp, &txflags, &rxflags)) {
			u32 local_adv = 0, remote_adv = 0;
L
Linus Torvalds 已提交
3880

3881 3882 3883 3884 3885 3886 3887 3888 3889
			if (txflags & ANEG_CFG_PS1)
				local_adv |= ADVERTISE_1000XPAUSE;
			if (txflags & ANEG_CFG_PS2)
				local_adv |= ADVERTISE_1000XPSE_ASYM;

			if (rxflags & MR_LP_ADV_SYM_PAUSE)
				remote_adv |= LPA_1000XPAUSE;
			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
				remote_adv |= LPA_1000XPAUSE_ASYM;
L
Linus Torvalds 已提交
3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912

			tg3_setup_flow_control(tp, local_adv, remote_adv);

			current_link_up = 1;
		}
		for (i = 0; i < 30; i++) {
			udelay(20);
			tw32_f(MAC_STATUS,
			       (MAC_STATUS_SYNC_CHANGED |
				MAC_STATUS_CFG_CHANGED));
			udelay(40);
			if ((tr32(MAC_STATUS) &
			     (MAC_STATUS_SYNC_CHANGED |
			      MAC_STATUS_CFG_CHANGED)) == 0)
				break;
		}

		mac_status = tr32(MAC_STATUS);
		if (current_link_up == 0 &&
		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
		    !(mac_status & MAC_STATUS_RCVD_CFG))
			current_link_up = 1;
	} else {
3913 3914
		tg3_setup_flow_control(tp, 0, 0);

L
Linus Torvalds 已提交
3915 3916 3917 3918 3919
		/* Forcing 1000FD link up. */
		current_link_up = 1;

		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
		udelay(40);
M
Matt Carlson 已提交
3920 3921 3922

		tw32_f(MAC_MODE, tp->mac_mode);
		udelay(40);
L
Linus Torvalds 已提交
3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
	}

out:
	return current_link_up;
}

static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
{
	u32 orig_pause_cfg;
	u16 orig_active_speed;
	u8 orig_active_duplex;
	u32 mac_status;
	int current_link_up;
	int i;

3938
	orig_pause_cfg = tp->link_config.active_flowctrl;
L
Linus Torvalds 已提交
3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964
	orig_active_speed = tp->link_config.active_speed;
	orig_active_duplex = tp->link_config.active_duplex;

	if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
	    netif_carrier_ok(tp->dev) &&
	    (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
		mac_status = tr32(MAC_STATUS);
		mac_status &= (MAC_STATUS_PCS_SYNCED |
			       MAC_STATUS_SIGNAL_DET |
			       MAC_STATUS_CFG_CHANGED |
			       MAC_STATUS_RCVD_CFG);
		if (mac_status == (MAC_STATUS_PCS_SYNCED |
				   MAC_STATUS_SIGNAL_DET)) {
			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
					    MAC_STATUS_CFG_CHANGED));
			return 0;
		}
	}

	tw32_f(MAC_TX_AUTO_NEG, 0);

	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

3965
	if (tp->phy_id == TG3_PHY_ID_BCM8002)
L
Linus Torvalds 已提交
3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979
		tg3_init_bcm8002(tp);

	/* Enable link change event even when serdes polling.  */
	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
	udelay(40);

	current_link_up = 0;
	mac_status = tr32(MAC_STATUS);

	if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
	else
		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);

3980
	tp->napi[0].hw_status->status =
L
Linus Torvalds 已提交
3981
		(SD_STATUS_UPDATED |
3982
		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
L
Linus Torvalds 已提交
3983 3984 3985 3986 3987 3988

	for (i = 0; i < 100; i++) {
		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
				    MAC_STATUS_CFG_CHANGED));
		udelay(5);
		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
M
Michael Chan 已提交
3989 3990
					 MAC_STATUS_CFG_CHANGED |
					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
L
Linus Torvalds 已提交
3991 3992 3993 3994 3995 3996
			break;
	}

	mac_status = tr32(MAC_STATUS);
	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
		current_link_up = 0;
M
Michael Chan 已提交
3997 3998
		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
		    tp->serdes_counter == 0) {
L
Linus Torvalds 已提交
3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026
			tw32_f(MAC_MODE, (tp->mac_mode |
					  MAC_MODE_SEND_CONFIGS));
			udelay(1);
			tw32_f(MAC_MODE, tp->mac_mode);
		}
	}

	if (current_link_up == 1) {
		tp->link_config.active_speed = SPEED_1000;
		tp->link_config.active_duplex = DUPLEX_FULL;
		tw32(MAC_LED_CTRL, (tp->led_ctrl |
				    LED_CTRL_LNKLED_OVERRIDE |
				    LED_CTRL_1000MBPS_ON));
	} else {
		tp->link_config.active_speed = SPEED_INVALID;
		tp->link_config.active_duplex = DUPLEX_INVALID;
		tw32(MAC_LED_CTRL, (tp->led_ctrl |
				    LED_CTRL_LNKLED_OVERRIDE |
				    LED_CTRL_TRAFFIC_OVERRIDE));
	}

	if (current_link_up != netif_carrier_ok(tp->dev)) {
		if (current_link_up)
			netif_carrier_on(tp->dev);
		else
			netif_carrier_off(tp->dev);
		tg3_link_report(tp);
	} else {
4027
		u32 now_pause_cfg = tp->link_config.active_flowctrl;
L
Linus Torvalds 已提交
4028 4029 4030 4031 4032 4033 4034 4035 4036
		if (orig_pause_cfg != now_pause_cfg ||
		    orig_active_speed != tp->link_config.active_speed ||
		    orig_active_duplex != tp->link_config.active_duplex)
			tg3_link_report(tp);
	}

	return 0;
}

M
Michael Chan 已提交
4037 4038 4039 4040 4041 4042
static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
{
	int current_link_up, err = 0;
	u32 bmsr, bmcr;
	u16 current_speed;
	u8 current_duplex;
4043
	u32 local_adv, remote_adv;
M
Michael Chan 已提交
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066

	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

	tw32(MAC_EVENT, 0);

	tw32_f(MAC_STATUS,
	     (MAC_STATUS_SYNC_CHANGED |
	      MAC_STATUS_CFG_CHANGED |
	      MAC_STATUS_MI_COMPLETION |
	      MAC_STATUS_LNKSTATE_CHANGED));
	udelay(40);

	if (force_reset)
		tg3_phy_reset(tp);

	current_link_up = 0;
	current_speed = SPEED_INVALID;
	current_duplex = DUPLEX_INVALID;

	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4067 4068 4069 4070 4071 4072
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
			bmsr |= BMSR_LSTATUS;
		else
			bmsr &= ~BMSR_LSTATUS;
	}
M
Michael Chan 已提交
4073 4074 4075 4076

	err |= tg3_readphy(tp, MII_BMCR, &bmcr);

	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4077
	    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
M
Michael Chan 已提交
4078 4079 4080 4081 4082 4083 4084 4085 4086 4087
		/* do nothing, just check for link up at the end */
	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
		u32 adv, new_adv;

		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
				  ADVERTISE_1000XPAUSE |
				  ADVERTISE_1000XPSE_ASYM |
				  ADVERTISE_SLCT);

4088
		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
M
Michael Chan 已提交
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100

		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
			new_adv |= ADVERTISE_1000XHALF;
		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
			new_adv |= ADVERTISE_1000XFULL;

		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
			tg3_writephy(tp, MII_ADVERTISE, new_adv);
			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
			tg3_writephy(tp, MII_BMCR, bmcr);

			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
M
Michael Chan 已提交
4101
			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
M
Michael Chan 已提交
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139
			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;

			return err;
		}
	} else {
		u32 new_bmcr;

		bmcr &= ~BMCR_SPEED1000;
		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);

		if (tp->link_config.duplex == DUPLEX_FULL)
			new_bmcr |= BMCR_FULLDPLX;

		if (new_bmcr != bmcr) {
			/* BMCR_SPEED1000 is a reserved bit that needs
			 * to be set on write.
			 */
			new_bmcr |= BMCR_SPEED1000;

			/* Force a linkdown */
			if (netif_carrier_ok(tp->dev)) {
				u32 adv;

				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
				adv &= ~(ADVERTISE_1000XFULL |
					 ADVERTISE_1000XHALF |
					 ADVERTISE_SLCT);
				tg3_writephy(tp, MII_ADVERTISE, adv);
				tg3_writephy(tp, MII_BMCR, bmcr |
							   BMCR_ANRESTART |
							   BMCR_ANENABLE);
				udelay(10);
				netif_carrier_off(tp->dev);
			}
			tg3_writephy(tp, MII_BMCR, new_bmcr);
			bmcr = new_bmcr;
			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4140 4141 4142 4143 4144 4145 4146
			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
			    ASIC_REV_5714) {
				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
					bmsr |= BMSR_LSTATUS;
				else
					bmsr &= ~BMSR_LSTATUS;
			}
M
Michael Chan 已提交
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158
			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
		}
	}

	if (bmsr & BMSR_LSTATUS) {
		current_speed = SPEED_1000;
		current_link_up = 1;
		if (bmcr & BMCR_FULLDPLX)
			current_duplex = DUPLEX_FULL;
		else
			current_duplex = DUPLEX_HALF;

4159 4160 4161
		local_adv = 0;
		remote_adv = 0;

M
Michael Chan 已提交
4162
		if (bmcr & BMCR_ANENABLE) {
4163
			u32 common;
M
Michael Chan 已提交
4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179

			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
			common = local_adv & remote_adv;
			if (common & (ADVERTISE_1000XHALF |
				      ADVERTISE_1000XFULL)) {
				if (common & ADVERTISE_1000XFULL)
					current_duplex = DUPLEX_FULL;
				else
					current_duplex = DUPLEX_HALF;
			}
			else
				current_link_up = 0;
		}
	}

4180 4181 4182
	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
		tg3_setup_flow_control(tp, local_adv, remote_adv);

M
Michael Chan 已提交
4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
	if (tp->link_config.active_duplex == DUPLEX_HALF)
		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;

	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);

	tp->link_config.active_speed = current_speed;
	tp->link_config.active_duplex = current_duplex;

	if (current_link_up != netif_carrier_ok(tp->dev)) {
		if (current_link_up)
			netif_carrier_on(tp->dev);
		else {
			netif_carrier_off(tp->dev);
			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
		}
		tg3_link_report(tp);
	}
	return err;
}

static void tg3_serdes_parallel_detect(struct tg3 *tp)
{
M
Michael Chan 已提交
4209
	if (tp->serdes_counter) {
M
Michael Chan 已提交
4210
		/* Give autoneg time to complete. */
M
Michael Chan 已提交
4211
		tp->serdes_counter--;
M
Michael Chan 已提交
4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264
		return;
	}
	if (!netif_carrier_ok(tp->dev) &&
	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
		u32 bmcr;

		tg3_readphy(tp, MII_BMCR, &bmcr);
		if (bmcr & BMCR_ANENABLE) {
			u32 phy1, phy2;

			/* Select shadow register 0x1f */
			tg3_writephy(tp, 0x1c, 0x7c00);
			tg3_readphy(tp, 0x1c, &phy1);

			/* Select expansion interrupt status register */
			tg3_writephy(tp, 0x17, 0x0f01);
			tg3_readphy(tp, 0x15, &phy2);
			tg3_readphy(tp, 0x15, &phy2);

			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
				/* We have signal detect and not receiving
				 * config code words, link is up by parallel
				 * detection.
				 */

				bmcr &= ~BMCR_ANENABLE;
				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
				tg3_writephy(tp, MII_BMCR, bmcr);
				tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
			}
		}
	}
	else if (netif_carrier_ok(tp->dev) &&
		 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
		 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
		u32 phy2;

		/* Select expansion interrupt status register */
		tg3_writephy(tp, 0x17, 0x0f01);
		tg3_readphy(tp, 0x15, &phy2);
		if (phy2 & 0x20) {
			u32 bmcr;

			/* Config code words received, turn on autoneg. */
			tg3_readphy(tp, MII_BMCR, &bmcr);
			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);

			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;

		}
	}
}

L
Linus Torvalds 已提交
4265 4266 4267 4268 4269 4270
static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
	int err;

	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
		err = tg3_setup_fiber_phy(tp, force_reset);
M
Michael Chan 已提交
4271 4272
	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
		err = tg3_setup_fiber_mii_phy(tp, force_reset);
L
Linus Torvalds 已提交
4273 4274 4275 4276
	} else {
		err = tg3_setup_copper_phy(tp, force_reset);
	}

4277
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
M
Matt Carlson 已提交
4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292
		u32 val, scale;

		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
			scale = 65;
		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
			scale = 6;
		else
			scale = 12;

		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
		tw32(GRC_MISC_CFG, val);
	}

L
Linus Torvalds 已提交
4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307
	if (tp->link_config.active_speed == SPEED_1000 &&
	    tp->link_config.active_duplex == DUPLEX_HALF)
		tw32(MAC_TX_LENGTHS,
		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
		      (6 << TX_LENGTHS_IPG_SHIFT) |
		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
	else
		tw32(MAC_TX_LENGTHS,
		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
		      (6 << TX_LENGTHS_IPG_SHIFT) |
		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		if (netif_carrier_ok(tp->dev)) {
			tw32(HOSTCC_STAT_COAL_TICKS,
4308
			     tp->coal.stats_block_coalesce_usecs);
L
Linus Torvalds 已提交
4309 4310 4311 4312 4313
		} else {
			tw32(HOSTCC_STAT_COAL_TICKS, 0);
		}
	}

M
Matt Carlson 已提交
4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
		u32 val = tr32(PCIE_PWR_MGMT_THRESH);
		if (!netif_carrier_ok(tp->dev))
			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
			      tp->pwrmgmt_thresh;
		else
			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
		tw32(PCIE_PWR_MGMT_THRESH, val);
	}

L
Linus Torvalds 已提交
4324 4325 4326
	return err;
}

4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
/* This is called whenever we suspect that the system chipset is re-
 * ordering the sequence of MMIO to the tx send mailbox. The symptom
 * is bogus tx completions. We try to recover by setting the
 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
 * in the workqueue.
 */
static void tg3_tx_recover(struct tg3 *tp)
{
	BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
	       tp->write32_tx_mbox == tg3_write_indirect_mbox);

4338 4339
	netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
		    "Please report the problem to the driver maintainer and include system chipset information.\n");
4340 4341 4342 4343 4344 4345

	spin_lock(&tp->lock);
	tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
	spin_unlock(&tp->lock);
}

4346
static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
M
Michael Chan 已提交
4347 4348
{
	smp_mb();
4349 4350
	return tnapi->tx_pending -
	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
M
Michael Chan 已提交
4351 4352
}

L
Linus Torvalds 已提交
4353 4354 4355 4356
/* Tigon3 never reports partial packet sends.  So we do not
 * need special logic to handle SKBs that have not had all
 * of their frags sent yet, like SunGEM does.
 */
4357
static void tg3_tx(struct tg3_napi *tnapi)
L
Linus Torvalds 已提交
4358
{
4359
	struct tg3 *tp = tnapi->tp;
4360
	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4361
	u32 sw_idx = tnapi->tx_cons;
M
Matt Carlson 已提交
4362 4363 4364
	struct netdev_queue *txq;
	int index = tnapi - tp->napi;

4365
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
M
Matt Carlson 已提交
4366 4367 4368
		index--;

	txq = netdev_get_tx_queue(tp->dev, index);
L
Linus Torvalds 已提交
4369 4370

	while (sw_idx != hw_idx) {
4371
		struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
L
Linus Torvalds 已提交
4372
		struct sk_buff *skb = ri->skb;
4373 4374 4375 4376 4377 4378
		int i, tx_bug = 0;

		if (unlikely(skb == NULL)) {
			tg3_tx_recover(tp);
			return;
		}
L
Linus Torvalds 已提交
4379

4380 4381 4382 4383
		pci_unmap_single(tp->pdev,
				 pci_unmap_addr(ri, mapping),
				 skb_headlen(skb),
				 PCI_DMA_TODEVICE);
L
Linus Torvalds 已提交
4384 4385 4386 4387 4388 4389

		ri->skb = NULL;

		sw_idx = NEXT_TX(sw_idx);

		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4390
			ri = &tnapi->tx_buffers[sw_idx];
4391 4392
			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
				tx_bug = 1;
4393 4394 4395 4396 4397

			pci_unmap_page(tp->pdev,
				       pci_unmap_addr(ri, mapping),
				       skb_shinfo(skb)->frags[i].size,
				       PCI_DMA_TODEVICE);
L
Linus Torvalds 已提交
4398 4399 4400
			sw_idx = NEXT_TX(sw_idx);
		}

4401
		dev_kfree_skb(skb);
4402 4403 4404 4405 4406

		if (unlikely(tx_bug)) {
			tg3_tx_recover(tp);
			return;
		}
L
Linus Torvalds 已提交
4407 4408
	}

4409
	tnapi->tx_cons = sw_idx;
L
Linus Torvalds 已提交
4410

M
Michael Chan 已提交
4411 4412 4413 4414 4415 4416 4417
	/* Need to make the tx_cons update visible to tg3_start_xmit()
	 * before checking for netif_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that tg3_start_xmit()
	 * will miss it and cause the queue to be stopped forever.
	 */
	smp_mb();

M
Matt Carlson 已提交
4418
	if (unlikely(netif_tx_queue_stopped(txq) &&
4419
		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
M
Matt Carlson 已提交
4420 4421
		__netif_tx_lock(txq, smp_processor_id());
		if (netif_tx_queue_stopped(txq) &&
4422
		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
M
Matt Carlson 已提交
4423 4424
			netif_tx_wake_queue(txq);
		__netif_tx_unlock(txq);
4425
	}
L
Linus Torvalds 已提交
4426 4427
}

4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
	if (!ri->skb)
		return;

	pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
			 map_sz, PCI_DMA_FROMDEVICE);
	dev_kfree_skb_any(ri->skb);
	ri->skb = NULL;
}

L
Linus Torvalds 已提交
4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449
/* Returns size of skb allocated or < 0 on error.
 *
 * We only need to fill in the address because the other members
 * of the RX descriptor are invariant, see tg3_init_rings.
 *
 * Note the purposeful assymetry of cpu vs. chip accesses.  For
 * posting buffers we only dirty the first cache line of the RX
 * descriptor (containing the address).  Whereas for the RX status
 * buffers the cpu only reads the last cacheline of the RX descriptor
 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
 */
M
Matt Carlson 已提交
4450
static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4451
			    u32 opaque_key, u32 dest_idx_unmasked)
L
Linus Torvalds 已提交
4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462
{
	struct tg3_rx_buffer_desc *desc;
	struct ring_info *map, *src_map;
	struct sk_buff *skb;
	dma_addr_t mapping;
	int skb_size, dest_idx;

	src_map = NULL;
	switch (opaque_key) {
	case RXD_OPAQUE_RING_STD:
		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4463 4464
		desc = &tpr->rx_std[dest_idx];
		map = &tpr->rx_std_buffers[dest_idx];
4465
		skb_size = tp->rx_pkt_map_sz;
L
Linus Torvalds 已提交
4466 4467 4468 4469
		break;

	case RXD_OPAQUE_RING_JUMBO:
		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
M
Matt Carlson 已提交
4470
		desc = &tpr->rx_jmb[dest_idx].std;
4471
		map = &tpr->rx_jmb_buffers[dest_idx];
4472
		skb_size = TG3_RX_JMB_MAP_SZ;
L
Linus Torvalds 已提交
4473 4474 4475 4476
		break;

	default:
		return -EINVAL;
4477
	}
L
Linus Torvalds 已提交
4478 4479 4480 4481 4482 4483 4484

	/* Do not overwrite any of the map or rp information
	 * until we are sure we can commit to a new buffer.
	 *
	 * Callers depend upon this behavior and assume that
	 * we leave everything unchanged if we fail.
	 */
4485
	skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
L
Linus Torvalds 已提交
4486 4487 4488 4489 4490
	if (skb == NULL)
		return -ENOMEM;

	skb_reserve(skb, tp->rx_offset);

4491
	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
L
Linus Torvalds 已提交
4492
				 PCI_DMA_FROMDEVICE);
4493 4494 4495 4496
	if (pci_dma_mapping_error(tp->pdev, mapping)) {
		dev_kfree_skb(skb);
		return -EIO;
	}
L
Linus Torvalds 已提交
4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510

	map->skb = skb;
	pci_unmap_addr_set(map, mapping, mapping);

	desc->addr_hi = ((u64)mapping >> 32);
	desc->addr_lo = ((u64)mapping & 0xffffffff);

	return skb_size;
}

/* We only need to move over in the address because the other
 * members of the RX descriptor are invariant.  See notes above
 * tg3_alloc_rx_skb for full details.
 */
4511 4512 4513 4514
static void tg3_recycle_rx(struct tg3_napi *tnapi,
			   struct tg3_rx_prodring_set *dpr,
			   u32 opaque_key, int src_idx,
			   u32 dest_idx_unmasked)
L
Linus Torvalds 已提交
4515
{
4516
	struct tg3 *tp = tnapi->tp;
L
Linus Torvalds 已提交
4517 4518 4519
	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
	struct ring_info *src_map, *dest_map;
	int dest_idx;
4520
	struct tg3_rx_prodring_set *spr = &tp->prodring[0];
L
Linus Torvalds 已提交
4521 4522 4523 4524

	switch (opaque_key) {
	case RXD_OPAQUE_RING_STD:
		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4525 4526 4527 4528
		dest_desc = &dpr->rx_std[dest_idx];
		dest_map = &dpr->rx_std_buffers[dest_idx];
		src_desc = &spr->rx_std[src_idx];
		src_map = &spr->rx_std_buffers[src_idx];
L
Linus Torvalds 已提交
4529 4530 4531 4532
		break;

	case RXD_OPAQUE_RING_JUMBO:
		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4533 4534 4535 4536
		dest_desc = &dpr->rx_jmb[dest_idx].std;
		dest_map = &dpr->rx_jmb_buffers[dest_idx];
		src_desc = &spr->rx_jmb[src_idx].std;
		src_map = &spr->rx_jmb_buffers[src_idx];
L
Linus Torvalds 已提交
4537 4538 4539 4540
		break;

	default:
		return;
4541
	}
L
Linus Torvalds 已提交
4542 4543 4544 4545 4546 4547

	dest_map->skb = src_map->skb;
	pci_unmap_addr_set(dest_map, mapping,
			   pci_unmap_addr(src_map, mapping));
	dest_desc->addr_hi = src_desc->addr_hi;
	dest_desc->addr_lo = src_desc->addr_lo;
4548 4549 4550 4551 4552 4553

	/* Ensure that the update to the skb happens after the physical
	 * addresses have been transferred to the new BD location.
	 */
	smp_wmb();

L
Linus Torvalds 已提交
4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
	src_map->skb = NULL;
}

/* The RX ring scheme is composed of multiple rings which post fresh
 * buffers to the chip, and one special ring the chip uses to report
 * status back to the host.
 *
 * The special ring reports the status of received packets to the
 * host.  The chip does not write into the original descriptor the
 * RX buffer was obtained from.  The chip simply takes the original
 * descriptor as provided by the host, updates the status and length
 * field, then writes this into the next status ring entry.
 *
 * Each ring the host uses to post buffers to the chip is described
 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
 * it is first placed into the on-chip ram.  When the packet's length
 * is known, it walks down the TG3_BDINFO entries to select the ring.
 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
 * which is within the range of the new packet's length is chosen.
 *
 * The "separate ring for rx status" scheme may sound queer, but it makes
 * sense from a cache coherency perspective.  If only the host writes
 * to the buffer post rings, and only the chip writes to the rx status
 * rings, then cache lines never move beyond shared-modified state.
 * If both the host and chip were to write into the same ring, cache line
 * eviction could occur since both entities want it in an exclusive state.
 */
4581
static int tg3_rx(struct tg3_napi *tnapi, int budget)
L
Linus Torvalds 已提交
4582
{
4583
	struct tg3 *tp = tnapi->tp;
M
Michael Chan 已提交
4584
	u32 work_mask, rx_std_posted = 0;
4585
	u32 std_prod_idx, jmb_prod_idx;
4586
	u32 sw_idx = tnapi->rx_rcb_ptr;
M
Michael Chan 已提交
4587
	u16 hw_idx;
L
Linus Torvalds 已提交
4588
	int received;
M
Matt Carlson 已提交
4589
	struct tg3_rx_prodring_set *tpr = tnapi->prodring;
L
Linus Torvalds 已提交
4590

4591
	hw_idx = *(tnapi->rx_rcb_prod_idx);
L
Linus Torvalds 已提交
4592 4593 4594 4595 4596 4597 4598
	/*
	 * We need to order the read of hw_idx and the read of
	 * the opaque cookie.
	 */
	rmb();
	work_mask = 0;
	received = 0;
4599 4600
	std_prod_idx = tpr->rx_std_prod_idx;
	jmb_prod_idx = tpr->rx_jmb_prod_idx;
L
Linus Torvalds 已提交
4601
	while (sw_idx != hw_idx && budget > 0) {
4602
		struct ring_info *ri;
4603
		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
L
Linus Torvalds 已提交
4604 4605 4606 4607 4608 4609 4610 4611
		unsigned int len;
		struct sk_buff *skb;
		dma_addr_t dma_addr;
		u32 opaque_key, desc_idx, *post_ptr;

		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
		if (opaque_key == RXD_OPAQUE_RING_STD) {
M
Matt Carlson 已提交
4612
			ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4613 4614
			dma_addr = pci_unmap_addr(ri, mapping);
			skb = ri->skb;
4615
			post_ptr = &std_prod_idx;
M
Michael Chan 已提交
4616
			rx_std_posted++;
L
Linus Torvalds 已提交
4617
		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
M
Matt Carlson 已提交
4618
			ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4619 4620
			dma_addr = pci_unmap_addr(ri, mapping);
			skb = ri->skb;
4621
			post_ptr = &jmb_prod_idx;
4622
		} else
L
Linus Torvalds 已提交
4623 4624 4625 4626 4627 4628 4629
			goto next_pkt_nopost;

		work_mask |= opaque_key;

		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
		drop_it:
4630
			tg3_recycle_rx(tnapi, tpr, opaque_key,
L
Linus Torvalds 已提交
4631 4632 4633 4634 4635 4636 4637
				       desc_idx, *post_ptr);
		drop_it_no_recycle:
			/* Other statistics kept track of by card. */
			tp->net_stats.rx_dropped++;
			goto next_pkt;
		}

M
Matt Carlson 已提交
4638 4639
		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
		      ETH_FCS_LEN;
L
Linus Torvalds 已提交
4640

4641 4642 4643 4644 4645 4646
		if (len > RX_COPY_THRESHOLD &&
		    tp->rx_offset == NET_IP_ALIGN) {
		    /* rx_offset will likely not equal NET_IP_ALIGN
		     * if this is a 5701 card running in PCI-X mode
		     * [see tg3_get_invariants()]
		     */
L
Linus Torvalds 已提交
4647 4648
			int skb_size;

M
Matt Carlson 已提交
4649
			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4650
						    *post_ptr);
L
Linus Torvalds 已提交
4651 4652 4653
			if (skb_size < 0)
				goto drop_it;

4654
			pci_unmap_single(tp->pdev, dma_addr, skb_size,
L
Linus Torvalds 已提交
4655 4656
					 PCI_DMA_FROMDEVICE);

4657 4658 4659 4660 4661 4662 4663
			/* Ensure that the update to the skb happens
			 * after the usage of the old DMA mapping.
			 */
			smp_wmb();

			ri->skb = NULL;

L
Linus Torvalds 已提交
4664 4665 4666 4667
			skb_put(skb, len);
		} else {
			struct sk_buff *copy_skb;

4668
			tg3_recycle_rx(tnapi, tpr, opaque_key,
L
Linus Torvalds 已提交
4669 4670
				       desc_idx, *post_ptr);

M
Matt Carlson 已提交
4671 4672
			copy_skb = netdev_alloc_skb(tp->dev,
						    len + TG3_RAW_IP_ALIGN);
L
Linus Torvalds 已提交
4673 4674 4675
			if (copy_skb == NULL)
				goto drop_it_no_recycle;

M
Matt Carlson 已提交
4676
			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
L
Linus Torvalds 已提交
4677 4678
			skb_put(copy_skb, len);
			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4679
			skb_copy_from_linear_data(skb, copy_skb->data, len);
L
Linus Torvalds 已提交
4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694
			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);

			/* We'll reuse the original ring buffer. */
			skb = copy_skb;
		}

		if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		else
			skb->ip_summed = CHECKSUM_NONE;

		skb->protocol = eth_type_trans(skb, tp->dev);
4695 4696 4697 4698 4699 4700 4701

		if (len > (tp->dev->mtu + ETH_HLEN) &&
		    skb->protocol != htons(ETH_P_8021Q)) {
			dev_kfree_skb(skb);
			goto next_pkt;
		}

L
Linus Torvalds 已提交
4702 4703 4704
#if TG3_VLAN_TAG_USED
		if (tp->vlgrp != NULL &&
		    desc->type_flags & RXD_FLAG_VLAN) {
4705
			vlan_gro_receive(&tnapi->napi, tp->vlgrp,
M
Matt Carlson 已提交
4706
					 desc->err_vlan & RXD_VLAN_MASK, skb);
L
Linus Torvalds 已提交
4707 4708
		} else
#endif
4709
			napi_gro_receive(&tnapi->napi, skb);
L
Linus Torvalds 已提交
4710 4711 4712 4713 4714 4715

		received++;
		budget--;

next_pkt:
		(*post_ptr)++;
M
Michael Chan 已提交
4716 4717

		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4718 4719 4720
			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
				     tpr->rx_std_prod_idx);
M
Michael Chan 已提交
4721 4722 4723
			work_mask &= ~RXD_OPAQUE_RING_STD;
			rx_std_posted = 0;
		}
L
Linus Torvalds 已提交
4724
next_pkt_nopost:
M
Michael Chan 已提交
4725
		sw_idx++;
E
Eric Dumazet 已提交
4726
		sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4727 4728 4729

		/* Refresh hw_idx to see if there is new work */
		if (sw_idx == hw_idx) {
4730
			hw_idx = *(tnapi->rx_rcb_prod_idx);
4731 4732
			rmb();
		}
L
Linus Torvalds 已提交
4733 4734 4735
	}

	/* ACK the status ring. */
4736 4737
	tnapi->rx_rcb_ptr = sw_idx;
	tw32_rx_mbox(tnapi->consmbox, sw_idx);
L
Linus Torvalds 已提交
4738 4739

	/* Refill RX ring(s). */
4740
	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
M
Matt Carlson 已提交
4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758
		if (work_mask & RXD_OPAQUE_RING_STD) {
			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
				     tpr->rx_std_prod_idx);
		}
		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
			tpr->rx_jmb_prod_idx = jmb_prod_idx %
					       TG3_RX_JUMBO_RING_SIZE;
			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
				     tpr->rx_jmb_prod_idx);
		}
		mmiowb();
	} else if (work_mask) {
		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
		 * updated before the producer indices can be updated.
		 */
		smp_wmb();

4759 4760
		tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
		tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
M
Matt Carlson 已提交
4761

4762 4763
		if (tnapi != &tp->napi[1])
			napi_schedule(&tp->napi[1].napi);
L
Linus Torvalds 已提交
4764 4765 4766 4767 4768
	}

	return received;
}

4769
static void tg3_poll_link(struct tg3 *tp)
L
Linus Torvalds 已提交
4770 4771 4772 4773 4774
{
	/* handle link change and other phy events */
	if (!(tp->tg3_flags &
	      (TG3_FLAG_USE_LINKCHG_REG |
	       TG3_FLAG_POLL_SERDES))) {
4775 4776
		struct tg3_hw_status *sblk = tp->napi[0].hw_status;

L
Linus Torvalds 已提交
4777 4778
		if (sblk->status & SD_STATUS_LINK_CHG) {
			sblk->status = SD_STATUS_UPDATED |
4779
				       (sblk->status & ~SD_STATUS_LINK_CHG);
4780
			spin_lock(&tp->lock);
M
Matt Carlson 已提交
4781 4782 4783 4784 4785 4786 4787 4788 4789
			if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
				tw32_f(MAC_STATUS,
				     (MAC_STATUS_SYNC_CHANGED |
				      MAC_STATUS_CFG_CHANGED |
				      MAC_STATUS_MI_COMPLETION |
				      MAC_STATUS_LNKSTATE_CHANGED));
				udelay(40);
			} else
				tg3_setup_phy(tp, 0);
4790
			spin_unlock(&tp->lock);
L
Linus Torvalds 已提交
4791 4792
		}
	}
4793 4794
}

M
Matt Carlson 已提交
4795 4796 4797
static int tg3_rx_prodring_xfer(struct tg3 *tp,
				struct tg3_rx_prodring_set *dpr,
				struct tg3_rx_prodring_set *spr)
M
Matt Carlson 已提交
4798 4799
{
	u32 si, di, cpycnt, src_prod_idx;
M
Matt Carlson 已提交
4800
	int i, err = 0;
M
Matt Carlson 已提交
4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822

	while (1) {
		src_prod_idx = spr->rx_std_prod_idx;

		/* Make sure updates to the rx_std_buffers[] entries and the
		 * standard producer index are seen in the correct order.
		 */
		smp_rmb();

		if (spr->rx_std_cons_idx == src_prod_idx)
			break;

		if (spr->rx_std_cons_idx < src_prod_idx)
			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
		else
			cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;

		cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);

		si = spr->rx_std_cons_idx;
		di = dpr->rx_std_prod_idx;

4823 4824 4825
		for (i = di; i < di + cpycnt; i++) {
			if (dpr->rx_std_buffers[i].skb) {
				cpycnt = i - di;
M
Matt Carlson 已提交
4826
				err = -ENOSPC;
4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839
				break;
			}
		}

		if (!cpycnt)
			break;

		/* Ensure that updates to the rx_std_buffers ring and the
		 * shadowed hardware producer ring from tg3_recycle_skb() are
		 * ordered correctly WRT the skb check above.
		 */
		smp_rmb();

M
Matt Carlson 已提交
4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879
		memcpy(&dpr->rx_std_buffers[di],
		       &spr->rx_std_buffers[si],
		       cpycnt * sizeof(struct ring_info));

		for (i = 0; i < cpycnt; i++, di++, si++) {
			struct tg3_rx_buffer_desc *sbd, *dbd;
			sbd = &spr->rx_std[si];
			dbd = &dpr->rx_std[di];
			dbd->addr_hi = sbd->addr_hi;
			dbd->addr_lo = sbd->addr_lo;
		}

		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
				       TG3_RX_RING_SIZE;
		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
				       TG3_RX_RING_SIZE;
	}

	while (1) {
		src_prod_idx = spr->rx_jmb_prod_idx;

		/* Make sure updates to the rx_jmb_buffers[] entries and
		 * the jumbo producer index are seen in the correct order.
		 */
		smp_rmb();

		if (spr->rx_jmb_cons_idx == src_prod_idx)
			break;

		if (spr->rx_jmb_cons_idx < src_prod_idx)
			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
		else
			cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;

		cpycnt = min(cpycnt,
			     TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);

		si = spr->rx_jmb_cons_idx;
		di = dpr->rx_jmb_prod_idx;

4880 4881 4882
		for (i = di; i < di + cpycnt; i++) {
			if (dpr->rx_jmb_buffers[i].skb) {
				cpycnt = i - di;
M
Matt Carlson 已提交
4883
				err = -ENOSPC;
4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896
				break;
			}
		}

		if (!cpycnt)
			break;

		/* Ensure that updates to the rx_jmb_buffers ring and the
		 * shadowed hardware producer ring from tg3_recycle_skb() are
		 * ordered correctly WRT the skb check above.
		 */
		smp_rmb();

M
Matt Carlson 已提交
4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913
		memcpy(&dpr->rx_jmb_buffers[di],
		       &spr->rx_jmb_buffers[si],
		       cpycnt * sizeof(struct ring_info));

		for (i = 0; i < cpycnt; i++, di++, si++) {
			struct tg3_rx_buffer_desc *sbd, *dbd;
			sbd = &spr->rx_jmb[si].std;
			dbd = &dpr->rx_jmb[di].std;
			dbd->addr_hi = sbd->addr_hi;
			dbd->addr_lo = sbd->addr_lo;
		}

		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
				       TG3_RX_JUMBO_RING_SIZE;
		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
				       TG3_RX_JUMBO_RING_SIZE;
	}
M
Matt Carlson 已提交
4914 4915

	return err;
M
Matt Carlson 已提交
4916 4917
}

4918 4919 4920
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
{
	struct tg3 *tp = tnapi->tp;
L
Linus Torvalds 已提交
4921 4922

	/* run TX completion thread */
4923
	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4924
		tg3_tx(tnapi);
4925
		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
M
Michael Chan 已提交
4926
			return work_done;
L
Linus Torvalds 已提交
4927 4928 4929 4930
	}

	/* run RX thread, within the bounds set by NAPI.
	 * All RX "locking" is done by ensuring outside
4931
	 * code synchronizes with tg3->napi.poll()
L
Linus Torvalds 已提交
4932
	 */
4933
	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4934
		work_done += tg3_rx(tnapi, budget - work_done);
L
Linus Torvalds 已提交
4935

M
Matt Carlson 已提交
4936
	if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4937
		struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
M
Matt Carlson 已提交
4938
		int i, err = 0;
4939 4940
		u32 std_prod_idx = dpr->rx_std_prod_idx;
		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
M
Matt Carlson 已提交
4941

4942
		for (i = 1; i < tp->irq_cnt; i++)
M
Matt Carlson 已提交
4943 4944
			err |= tg3_rx_prodring_xfer(tp, dpr,
						    tp->napi[i].prodring);
M
Matt Carlson 已提交
4945 4946 4947

		wmb();

4948 4949 4950
		if (std_prod_idx != dpr->rx_std_prod_idx)
			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
				     dpr->rx_std_prod_idx);
M
Matt Carlson 已提交
4951

4952 4953 4954
		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
				     dpr->rx_jmb_prod_idx);
M
Matt Carlson 已提交
4955 4956

		mmiowb();
M
Matt Carlson 已提交
4957 4958 4959

		if (err)
			tw32_f(HOSTCC_MODE, tp->coal_now);
M
Matt Carlson 已提交
4960 4961
	}

4962 4963 4964
	return work_done;
}

4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008
static int tg3_poll_msix(struct napi_struct *napi, int budget)
{
	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
	struct tg3 *tp = tnapi->tp;
	int work_done = 0;
	struct tg3_hw_status *sblk = tnapi->hw_status;

	while (1) {
		work_done = tg3_poll_work(tnapi, work_done, budget);

		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
			goto tx_recovery;

		if (unlikely(work_done >= budget))
			break;

		/* tp->last_tag is used in tg3_restart_ints() below
		 * to tell the hw how much work has been processed,
		 * so we must read it before checking for more work.
		 */
		tnapi->last_tag = sblk->status_tag;
		tnapi->last_irq_tag = tnapi->last_tag;
		rmb();

		/* check for RX/TX work to do */
		if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
		    *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
			napi_complete(napi);
			/* Reenable interrupts. */
			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
			mmiowb();
			break;
		}
	}

	return work_done;

tx_recovery:
	/* work_done is guaranteed to be less than budget. */
	napi_complete(napi);
	schedule_work(&tp->reset_task);
	return work_done;
}

5009 5010
static int tg3_poll(struct napi_struct *napi, int budget)
{
M
Matt Carlson 已提交
5011 5012
	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
	struct tg3 *tp = tnapi->tp;
5013
	int work_done = 0;
5014
	struct tg3_hw_status *sblk = tnapi->hw_status;
5015 5016

	while (1) {
5017 5018
		tg3_poll_link(tp);

5019
		work_done = tg3_poll_work(tnapi, work_done, budget);
5020 5021 5022 5023 5024 5025 5026

		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
			goto tx_recovery;

		if (unlikely(work_done >= budget))
			break;

M
Michael Chan 已提交
5027
		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5028
			/* tp->last_tag is used in tg3_int_reenable() below
M
Michael Chan 已提交
5029 5030 5031
			 * to tell the hw how much work has been processed,
			 * so we must read it before checking for more work.
			 */
5032 5033
			tnapi->last_tag = sblk->status_tag;
			tnapi->last_irq_tag = tnapi->last_tag;
M
Michael Chan 已提交
5034 5035 5036
			rmb();
		} else
			sblk->status &= ~SD_STATUS_UPDATED;
5037

5038
		if (likely(!tg3_has_work(tnapi))) {
5039
			napi_complete(napi);
5040
			tg3_int_reenable(tnapi);
5041 5042
			break;
		}
L
Linus Torvalds 已提交
5043 5044
	}

5045
	return work_done;
5046 5047

tx_recovery:
M
Michael Chan 已提交
5048
	/* work_done is guaranteed to be less than budget. */
5049
	napi_complete(napi);
5050
	schedule_work(&tp->reset_task);
M
Michael Chan 已提交
5051
	return work_done;
L
Linus Torvalds 已提交
5052 5053
}

5054 5055
static void tg3_irq_quiesce(struct tg3 *tp)
{
5056 5057
	int i;

5058 5059 5060 5061 5062
	BUG_ON(tp->irq_sync);

	tp->irq_sync = 1;
	smp_mb();

5063 5064
	for (i = 0; i < tp->irq_cnt; i++)
		synchronize_irq(tp->napi[i].irq_vec);
5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078
}

static inline int tg3_irq_sync(struct tg3 *tp)
{
	return tp->irq_sync;
}

/* Fully shutdown all tg3 driver activity elsewhere in the system.
 * If irq_sync is non-zero, then the IRQ handler must be synchronized
 * with as well.  Most of the time, this is not necessary except when
 * shutting down the device.
 */
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
{
5079
	spin_lock_bh(&tp->lock);
5080 5081 5082 5083 5084 5085 5086 5087 5088
	if (irq_sync)
		tg3_irq_quiesce(tp);
}

static inline void tg3_full_unlock(struct tg3 *tp)
{
	spin_unlock_bh(&tp->lock);
}

5089 5090 5091
/* One-shot MSI handler - Chip automatically disables interrupt
 * after sending MSI so driver doesn't have to do it.
 */
5092
static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5093
{
5094 5095
	struct tg3_napi *tnapi = dev_id;
	struct tg3 *tp = tnapi->tp;
5096

5097
	prefetch(tnapi->hw_status);
5098 5099
	if (tnapi->rx_rcb)
		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5100 5101

	if (likely(!tg3_irq_sync(tp)))
5102
		napi_schedule(&tnapi->napi);
5103 5104 5105 5106

	return IRQ_HANDLED;
}

M
Michael Chan 已提交
5107 5108 5109 5110
/* MSI ISR - No need to check for interrupt sharing and no need to
 * flush status block and interrupt mailbox. PCI ordering rules
 * guarantee that MSI will arrive after the status block.
 */
5111
static irqreturn_t tg3_msi(int irq, void *dev_id)
M
Michael Chan 已提交
5112
{
5113 5114
	struct tg3_napi *tnapi = dev_id;
	struct tg3 *tp = tnapi->tp;
M
Michael Chan 已提交
5115

5116
	prefetch(tnapi->hw_status);
5117 5118
	if (tnapi->rx_rcb)
		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
M
Michael Chan 已提交
5119
	/*
5120
	 * Writing any value to intr-mbox-0 clears PCI INTA# and
M
Michael Chan 已提交
5121
	 * chip-internal interrupt pending events.
5122
	 * Writing non-zero to intr-mbox-0 additional tells the
M
Michael Chan 已提交
5123 5124 5125 5126
	 * NIC to stop sending us irqs, engaging "in-intr-handler"
	 * event coalescing.
	 */
	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5127
	if (likely(!tg3_irq_sync(tp)))
5128
		napi_schedule(&tnapi->napi);
5129

M
Michael Chan 已提交
5130 5131 5132
	return IRQ_RETVAL(1);
}

5133
static irqreturn_t tg3_interrupt(int irq, void *dev_id)
L
Linus Torvalds 已提交
5134
{
5135 5136
	struct tg3_napi *tnapi = dev_id;
	struct tg3 *tp = tnapi->tp;
5137
	struct tg3_hw_status *sblk = tnapi->hw_status;
L
Linus Torvalds 已提交
5138 5139 5140 5141 5142 5143 5144
	unsigned int handled = 1;

	/* In INTx mode, it is possible for the interrupt to arrive at
	 * the CPU before the status block posted prior to the interrupt.
	 * Reading the PCI State register will confirm whether the
	 * interrupt is ours and will flush the status block.
	 */
5145 5146 5147 5148
	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
			handled = 0;
5149
			goto out;
5150
		}
5151 5152 5153 5154 5155 5156 5157 5158
	}

	/*
	 * Writing any value to intr-mbox-0 clears PCI INTA# and
	 * chip-internal interrupt pending events.
	 * Writing non-zero to intr-mbox-0 additional tells the
	 * NIC to stop sending us irqs, engaging "in-intr-handler"
	 * event coalescing.
5159 5160 5161 5162
	 *
	 * Flush the mailbox to de-assert the IRQ immediately to prevent
	 * spurious interrupts.  The flush impacts performance but
	 * excessive spurious interrupts can be worse in some cases.
5163
	 */
5164
	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5165 5166 5167
	if (tg3_irq_sync(tp))
		goto out;
	sblk->status &= ~SD_STATUS_UPDATED;
5168
	if (likely(tg3_has_work(tnapi))) {
5169
		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5170
		napi_schedule(&tnapi->napi);
5171 5172 5173 5174 5175 5176
	} else {
		/* No work, shared interrupt perhaps?  re-enable
		 * interrupts, and flush that PCI write
		 */
		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
			       0x00000000);
5177
	}
5178
out:
5179 5180 5181
	return IRQ_RETVAL(handled);
}

5182
static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5183
{
5184 5185
	struct tg3_napi *tnapi = dev_id;
	struct tg3 *tp = tnapi->tp;
5186
	struct tg3_hw_status *sblk = tnapi->hw_status;
5187 5188 5189 5190 5191 5192 5193
	unsigned int handled = 1;

	/* In INTx mode, it is possible for the interrupt to arrive at
	 * the CPU before the status block posted prior to the interrupt.
	 * Reading the PCI State register will confirm whether the
	 * interrupt is ours and will flush the status block.
	 */
5194
	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5195 5196 5197
		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
			handled = 0;
5198
			goto out;
L
Linus Torvalds 已提交
5199
		}
5200 5201 5202 5203 5204 5205 5206 5207
	}

	/*
	 * writing any value to intr-mbox-0 clears PCI INTA# and
	 * chip-internal interrupt pending events.
	 * writing non-zero to intr-mbox-0 additional tells the
	 * NIC to stop sending us irqs, engaging "in-intr-handler"
	 * event coalescing.
5208 5209 5210 5211
	 *
	 * Flush the mailbox to de-assert the IRQ immediately to prevent
	 * spurious interrupts.  The flush impacts performance but
	 * excessive spurious interrupts can be worse in some cases.
5212
	 */
5213
	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5214 5215 5216 5217 5218 5219 5220

	/*
	 * In a shared interrupt configuration, sometimes other devices'
	 * interrupts will scream.  We record the current status tag here
	 * so that the above check can report that the screaming interrupts
	 * are unhandled.  Eventually they will be silenced.
	 */
5221
	tnapi->last_irq_tag = sblk->status_tag;
5222

5223 5224
	if (tg3_irq_sync(tp))
		goto out;
5225

5226
	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5227

5228
	napi_schedule(&tnapi->napi);
5229

5230
out:
L
Linus Torvalds 已提交
5231 5232 5233
	return IRQ_RETVAL(handled);
}

M
Michael Chan 已提交
5234
/* ISR for interrupt test */
5235
static irqreturn_t tg3_test_isr(int irq, void *dev_id)
M
Michael Chan 已提交
5236
{
5237 5238
	struct tg3_napi *tnapi = dev_id;
	struct tg3 *tp = tnapi->tp;
5239
	struct tg3_hw_status *sblk = tnapi->hw_status;
M
Michael Chan 已提交
5240

M
Michael Chan 已提交
5241 5242
	if ((sblk->status & SD_STATUS_UPDATED) ||
	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5243
		tg3_disable_ints(tp);
M
Michael Chan 已提交
5244 5245 5246 5247 5248
		return IRQ_RETVAL(1);
	}
	return IRQ_RETVAL(0);
}

5249
static int tg3_init_hw(struct tg3 *, int);
M
Michael Chan 已提交
5250
static int tg3_halt(struct tg3 *, int, int);
L
Linus Torvalds 已提交
5251

M
Michael Chan 已提交
5252 5253 5254 5255
/* Restart hardware after configuration changes, self-test, etc.
 * Invoked with tp->lock held.
 */
static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
E
Eric Dumazet 已提交
5256 5257
	__releases(tp->lock)
	__acquires(tp->lock)
M
Michael Chan 已提交
5258 5259 5260 5261 5262
{
	int err;

	err = tg3_init_hw(tp, reset_phy);
	if (err) {
5263
		netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
M
Michael Chan 已提交
5264 5265 5266 5267
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
		tg3_full_unlock(tp);
		del_timer_sync(&tp->timer);
		tp->irq_sync = 0;
5268
		tg3_napi_enable(tp);
M
Michael Chan 已提交
5269 5270 5271 5272 5273 5274
		dev_close(tp->dev);
		tg3_full_lock(tp, 0);
	}
	return err;
}

L
Linus Torvalds 已提交
5275 5276 5277
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
5278
	int i;
M
Michael Chan 已提交
5279 5280
	struct tg3 *tp = netdev_priv(dev);

5281 5282
	for (i = 0; i < tp->irq_cnt; i++)
		tg3_interrupt(tp->napi[i].irq_vec, dev);
L
Linus Torvalds 已提交
5283 5284 5285
}
#endif

D
David Howells 已提交
5286
static void tg3_reset_task(struct work_struct *work)
L
Linus Torvalds 已提交
5287
{
D
David Howells 已提交
5288
	struct tg3 *tp = container_of(work, struct tg3, reset_task);
M
Matt Carlson 已提交
5289
	int err;
L
Linus Torvalds 已提交
5290 5291
	unsigned int restart_timer;

M
Michael Chan 已提交
5292 5293 5294 5295 5296 5297 5298 5299 5300
	tg3_full_lock(tp, 0);

	if (!netif_running(tp->dev)) {
		tg3_full_unlock(tp);
		return;
	}

	tg3_full_unlock(tp);

M
Matt Carlson 已提交
5301 5302
	tg3_phy_stop(tp);

L
Linus Torvalds 已提交
5303 5304
	tg3_netif_stop(tp);

5305
	tg3_full_lock(tp, 1);
L
Linus Torvalds 已提交
5306 5307 5308 5309

	restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
	tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;

5310 5311 5312 5313 5314 5315 5316
	if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
		tp->write32_tx_mbox = tg3_write32_tx_mbox;
		tp->write32_rx_mbox = tg3_write_flush_reg32;
		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
		tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
	}

M
Michael Chan 已提交
5317
	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
M
Matt Carlson 已提交
5318 5319
	err = tg3_init_hw(tp, 1);
	if (err)
M
Michael Chan 已提交
5320
		goto out;
L
Linus Torvalds 已提交
5321 5322 5323 5324 5325

	tg3_netif_start(tp);

	if (restart_timer)
		mod_timer(&tp->timer, jiffies + 1);
M
Michael Chan 已提交
5326

M
Michael Chan 已提交
5327
out:
M
Michael Chan 已提交
5328
	tg3_full_unlock(tp);
M
Matt Carlson 已提交
5329 5330 5331

	if (!err)
		tg3_phy_start(tp);
L
Linus Torvalds 已提交
5332 5333
}

5334 5335
static void tg3_dump_short_state(struct tg3 *tp)
{
5336 5337 5338 5339
	netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
		   tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
	netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
		   tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5340 5341
}

L
Linus Torvalds 已提交
5342 5343 5344 5345
static void tg3_tx_timeout(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);

5346
	if (netif_msg_tx_err(tp)) {
5347
		netdev_err(dev, "transmit timed out, resetting\n");
5348 5349
		tg3_dump_short_state(tp);
	}
L
Linus Torvalds 已提交
5350 5351 5352 5353

	schedule_work(&tp->reset_task);
}

5354 5355 5356 5357 5358 5359 5360 5361 5362
/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
	u32 base = (u32) mapping & 0xffffffff;

	return ((base > 0xffffdcc0) &&
		(base + len + 8 < base));
}

M
Michael Chan 已提交
5363 5364 5365 5366 5367
/* Test for DMA addresses > 40-bit */
static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
					  int len)
{
#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5368
	if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5369
		return (((u64) mapping + len) > DMA_BIT_MASK(40));
M
Michael Chan 已提交
5370 5371 5372 5373 5374 5375
	return 0;
#else
	return 0;
#endif
}

5376
static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
L
Linus Torvalds 已提交
5377

M
Michael Chan 已提交
5378
/* Workaround 4GB and 40-bit hardware DMA bugs. */
5379 5380 5381
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
				       struct sk_buff *skb, u32 last_plus_one,
				       u32 *start, u32 base_flags, u32 mss)
L
Linus Torvalds 已提交
5382
{
5383
	struct tg3 *tp = tnapi->tp;
M
Matt Carlson 已提交
5384
	struct sk_buff *new_skb;
5385
	dma_addr_t new_addr = 0;
L
Linus Torvalds 已提交
5386
	u32 entry = *start;
5387
	int i, ret = 0;
L
Linus Torvalds 已提交
5388

M
Matt Carlson 已提交
5389 5390 5391 5392 5393 5394 5395 5396 5397 5398
	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
		new_skb = skb_copy(skb, GFP_ATOMIC);
	else {
		int more_headroom = 4 - ((unsigned long)skb->data & 3);

		new_skb = skb_copy_expand(skb,
					  skb_headroom(skb) + more_headroom,
					  skb_tailroom(skb), GFP_ATOMIC);
	}

L
Linus Torvalds 已提交
5399
	if (!new_skb) {
5400 5401 5402 5403
		ret = -1;
	} else {
		/* New SKB is guaranteed to be linear. */
		entry = *start;
5404 5405 5406 5407 5408 5409 5410
		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
					  PCI_DMA_TODEVICE);
		/* Make sure the mapping succeeded */
		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
			ret = -1;
			dev_kfree_skb(new_skb);
			new_skb = NULL;
5411

5412 5413 5414
		/* Make sure new skb does not cross any 4G boundaries.
		 * Drop the packet if it does.
		 */
5415 5416 5417 5418
		} else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
			    tg3_4g_overflow_test(new_addr, new_skb->len)) {
			pci_unmap_single(tp->pdev, new_addr, new_skb->len,
					 PCI_DMA_TODEVICE);
5419 5420 5421 5422
			ret = -1;
			dev_kfree_skb(new_skb);
			new_skb = NULL;
		} else {
5423
			tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5424 5425 5426
				    base_flags, 1 | (mss << 1));
			*start = NEXT_TX(entry);
		}
L
Linus Torvalds 已提交
5427 5428 5429 5430 5431
	}

	/* Now clean up the sw ring entries. */
	i = 0;
	while (entry != last_plus_one) {
5432 5433
		int len;

5434
		if (i == 0)
5435
			len = skb_headlen(skb);
5436
		else
5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447
			len = skb_shinfo(skb)->frags[i-1].size;

		pci_unmap_single(tp->pdev,
				 pci_unmap_addr(&tnapi->tx_buffers[entry],
						mapping),
				 len, PCI_DMA_TODEVICE);
		if (i == 0) {
			tnapi->tx_buffers[entry].skb = new_skb;
			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
					   new_addr);
		} else {
5448
			tnapi->tx_buffers[entry].skb = NULL;
5449
		}
L
Linus Torvalds 已提交
5450 5451 5452 5453 5454 5455
		entry = NEXT_TX(entry);
		i++;
	}

	dev_kfree_skb(skb);

5456
	return ret;
L
Linus Torvalds 已提交
5457 5458
}

5459
static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
L
Linus Torvalds 已提交
5460 5461 5462
			dma_addr_t mapping, int len, u32 flags,
			u32 mss_and_is_end)
{
5463
	struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
L
Linus Torvalds 已提交
5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481
	int is_end = (mss_and_is_end & 0x1);
	u32 mss = (mss_and_is_end >> 1);
	u32 vlan_tag = 0;

	if (is_end)
		flags |= TXD_FLAG_END;
	if (flags & TXD_FLAG_VLAN) {
		vlan_tag = flags >> 16;
		flags &= 0xffff;
	}
	vlan_tag |= (mss << TXD_MSS_SHIFT);

	txd->addr_hi = ((u64) mapping >> 32);
	txd->addr_lo = ((u64) mapping & 0xffffffff);
	txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
	txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
}

M
Michael Chan 已提交
5482
/* hard_start_xmit for devices that don't have any bugs and
M
Matt Carlson 已提交
5483
 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
M
Michael Chan 已提交
5484
 */
5485 5486
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
				  struct net_device *dev)
M
Michael Chan 已提交
5487 5488 5489
{
	struct tg3 *tp = netdev_priv(dev);
	u32 len, entry, base_flags, mss;
5490
	dma_addr_t mapping;
M
Matt Carlson 已提交
5491 5492
	struct tg3_napi *tnapi;
	struct netdev_queue *txq;
5493 5494
	unsigned int i, last;

M
Michael Chan 已提交
5495

M
Matt Carlson 已提交
5496 5497
	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5498
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
M
Matt Carlson 已提交
5499
		tnapi++;
M
Michael Chan 已提交
5500

M
Michael Chan 已提交
5501
	/* We are running in BH disabled context with netif_tx_lock
5502
	 * and TX reclaim runs via tp->napi.poll inside of a software
M
Michael Chan 已提交
5503 5504 5505
	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
	 * no IRQ context deadlocks to worry about either.  Rejoice!
	 */
5506
	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
M
Matt Carlson 已提交
5507 5508
		if (!netif_tx_queue_stopped(txq)) {
			netif_tx_stop_queue(txq);
M
Michael Chan 已提交
5509 5510

			/* This is a hard error, log it. */
5511
			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
M
Michael Chan 已提交
5512 5513 5514 5515
		}
		return NETDEV_TX_BUSY;
	}

5516
	entry = tnapi->tx_prod;
M
Michael Chan 已提交
5517 5518
	base_flags = 0;
	mss = 0;
M
Matt Carlson 已提交
5519
	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
M
Michael Chan 已提交
5520
		int tcp_opt_len, ip_tcp_len;
M
Matt Carlson 已提交
5521
		u32 hdrlen;
M
Michael Chan 已提交
5522 5523 5524 5525 5526 5527 5528

		if (skb_header_cloned(skb) &&
		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
			dev_kfree_skb(skb);
			goto out_unlock;
		}

M
Michael Chan 已提交
5529
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
M
Matt Carlson 已提交
5530
			hdrlen = skb_headlen(skb) - ETH_HLEN;
M
Michael Chan 已提交
5531
		else {
5532 5533
			struct iphdr *iph = ip_hdr(skb);

5534
			tcp_opt_len = tcp_optlen(skb);
5535
			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
M
Michael Chan 已提交
5536

5537 5538
			iph->check = 0;
			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
M
Matt Carlson 已提交
5539
			hdrlen = ip_tcp_len + tcp_opt_len;
M
Michael Chan 已提交
5540
		}
M
Michael Chan 已提交
5541

M
Matt Carlson 已提交
5542
		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
M
Matt Carlson 已提交
5543 5544 5545 5546 5547 5548 5549
			mss |= (hdrlen & 0xc) << 12;
			if (hdrlen & 0x10)
				base_flags |= 0x00000010;
			base_flags |= (hdrlen & 0x3e0) << 5;
		} else
			mss |= hdrlen << 9;

M
Michael Chan 已提交
5550 5551 5552
		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
			       TXD_FLAG_CPU_POST_DMA);

5553
		tcp_hdr(skb)->check = 0;
M
Michael Chan 已提交
5554 5555

	}
5556
	else if (skb->ip_summed == CHECKSUM_PARTIAL)
M
Michael Chan 已提交
5557 5558 5559 5560 5561 5562 5563
		base_flags |= TXD_FLAG_TCPUDP_CSUM;
#if TG3_VLAN_TAG_USED
	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
		base_flags |= (TXD_FLAG_VLAN |
			       (vlan_tx_tag_get(skb) << 16));
#endif

5564 5565 5566 5567 5568
	len = skb_headlen(skb);

	/* Queue skb data, a.k.a. the main skb fragment. */
	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5569 5570 5571 5572
		dev_kfree_skb(skb);
		goto out_unlock;
	}

5573
	tnapi->tx_buffers[entry].skb = skb;
5574
	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
M
Matt Carlson 已提交
5575

M
Matt Carlson 已提交
5576
	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
M
Matt Carlson 已提交
5577 5578 5579
	    !mss && skb->len > ETH_DATA_LEN)
		base_flags |= TXD_FLAG_JMB_PKT;

5580
	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
M
Michael Chan 已提交
5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591
		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));

	entry = NEXT_TX(entry);

	/* Now loop through additional data fragments, and queue them. */
	if (skb_shinfo(skb)->nr_frags > 0) {
		last = skb_shinfo(skb)->nr_frags - 1;
		for (i = 0; i <= last; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

			len = frag->size;
5592 5593 5594 5595 5596 5597 5598
			mapping = pci_map_page(tp->pdev,
					       frag->page,
					       frag->page_offset,
					       len, PCI_DMA_TODEVICE);
			if (pci_dma_mapping_error(tp->pdev, mapping))
				goto dma_error;

5599
			tnapi->tx_buffers[entry].skb = NULL;
5600 5601
			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
					   mapping);
M
Michael Chan 已提交
5602

5603
			tg3_set_txd(tnapi, entry, mapping, len,
M
Michael Chan 已提交
5604 5605 5606 5607 5608 5609 5610
				    base_flags, (i == last) | (mss << 1));

			entry = NEXT_TX(entry);
		}
	}

	/* Packets are ready, update Tx producer idx local and on card. */
5611
	tw32_tx_mbox(tnapi->prodmbox, entry);
M
Michael Chan 已提交
5612

5613 5614
	tnapi->tx_prod = entry;
	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
M
Matt Carlson 已提交
5615
		netif_tx_stop_queue(txq);
5616
		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
M
Matt Carlson 已提交
5617
			netif_tx_wake_queue(txq);
M
Michael Chan 已提交
5618 5619 5620
	}

out_unlock:
E
Eric Dumazet 已提交
5621
	mmiowb();
M
Michael Chan 已提交
5622 5623

	return NETDEV_TX_OK;
5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644

dma_error:
	last = i;
	entry = tnapi->tx_prod;
	tnapi->tx_buffers[entry].skb = NULL;
	pci_unmap_single(tp->pdev,
			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
			 skb_headlen(skb),
			 PCI_DMA_TODEVICE);
	for (i = 0; i <= last; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		entry = NEXT_TX(entry);

		pci_unmap_page(tp->pdev,
			       pci_unmap_addr(&tnapi->tx_buffers[entry],
					      mapping),
			       frag->size, PCI_DMA_TODEVICE);
	}

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
M
Michael Chan 已提交
5645 5646
}

5647 5648
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
					  struct net_device *);
5649 5650 5651 5652 5653 5654 5655

/* Use GSO to workaround a rare TSO bug that may be triggered when the
 * TSO header is greater than 80 bytes.
 */
static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
{
	struct sk_buff *segs, *nskb;
5656
	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5657 5658

	/* Estimate the number of fragments in the worst case */
5659
	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5660
		netif_stop_queue(tp->dev);
5661
		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
M
Michael Chan 已提交
5662 5663 5664
			return NETDEV_TX_BUSY;

		netif_wake_queue(tp->dev);
5665 5666 5667
	}

	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5668
	if (IS_ERR(segs))
5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683
		goto tg3_tso_bug_end;

	do {
		nskb = segs;
		segs = segs->next;
		nskb->next = NULL;
		tg3_start_xmit_dma_bug(nskb, tp->dev);
	} while (segs);

tg3_tso_bug_end:
	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}

M
Michael Chan 已提交
5684 5685 5686
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
 */
5687 5688
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
					  struct net_device *dev)
L
Linus Torvalds 已提交
5689 5690 5691 5692
{
	struct tg3 *tp = netdev_priv(dev);
	u32 len, entry, base_flags, mss;
	int would_hit_hwbug;
5693
	dma_addr_t mapping;
5694 5695
	struct tg3_napi *tnapi;
	struct netdev_queue *txq;
5696 5697
	unsigned int i, last;

L
Linus Torvalds 已提交
5698

5699 5700
	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5701
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5702
		tnapi++;
L
Linus Torvalds 已提交
5703

M
Michael Chan 已提交
5704
	/* We are running in BH disabled context with netif_tx_lock
5705
	 * and TX reclaim runs via tp->napi.poll inside of a software
5706 5707
	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
	 * no IRQ context deadlocks to worry about either.  Rejoice!
L
Linus Torvalds 已提交
5708
	 */
5709
	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5710 5711
		if (!netif_tx_queue_stopped(txq)) {
			netif_tx_stop_queue(txq);
S
Stephen Hemminger 已提交
5712 5713

			/* This is a hard error, log it. */
5714
			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
S
Stephen Hemminger 已提交
5715
		}
L
Linus Torvalds 已提交
5716 5717 5718
		return NETDEV_TX_BUSY;
	}

5719
	entry = tnapi->tx_prod;
L
Linus Torvalds 已提交
5720
	base_flags = 0;
5721
	if (skb->ip_summed == CHECKSUM_PARTIAL)
L
Linus Torvalds 已提交
5722
		base_flags |= TXD_FLAG_TCPUDP_CSUM;
5723

M
Matt Carlson 已提交
5724
	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5725
		struct iphdr *iph;
M
Matt Carlson 已提交
5726
		u32 tcp_opt_len, ip_tcp_len, hdr_len;
L
Linus Torvalds 已提交
5727 5728 5729 5730 5731 5732 5733

		if (skb_header_cloned(skb) &&
		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
			dev_kfree_skb(skb);
			goto out_unlock;
		}

5734
		tcp_opt_len = tcp_optlen(skb);
5735
		ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
L
Linus Torvalds 已提交
5736

5737 5738
		hdr_len = ip_tcp_len + tcp_opt_len;
		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
M
Michael Chan 已提交
5739
			     (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5740 5741
			return (tg3_tso_bug(tp, skb));

L
Linus Torvalds 已提交
5742 5743 5744
		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
			       TXD_FLAG_CPU_POST_DMA);

5745 5746 5747
		iph = ip_hdr(skb);
		iph->check = 0;
		iph->tot_len = htons(mss + hdr_len);
L
Linus Torvalds 已提交
5748
		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5749
			tcp_hdr(skb)->check = 0;
L
Linus Torvalds 已提交
5750
			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5751 5752 5753 5754 5755
		} else
			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
								 iph->daddr, 0,
								 IPPROTO_TCP,
								 0);
L
Linus Torvalds 已提交
5756

5757 5758 5759 5760 5761 5762
		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
			mss |= (hdr_len & 0xc) << 12;
			if (hdr_len & 0x10)
				base_flags |= 0x00000010;
			base_flags |= (hdr_len & 0x3e0) << 5;
		} else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
M
Matt Carlson 已提交
5763 5764 5765
			mss |= hdr_len << 9;
		else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5766
			if (tcp_opt_len || iph->ihl > 5) {
L
Linus Torvalds 已提交
5767 5768
				int tsflags;

5769
				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
L
Linus Torvalds 已提交
5770 5771 5772
				mss |= (tsflags << 11);
			}
		} else {
5773
			if (tcp_opt_len || iph->ihl > 5) {
L
Linus Torvalds 已提交
5774 5775
				int tsflags;

5776
				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
L
Linus Torvalds 已提交
5777 5778 5779 5780 5781 5782 5783 5784 5785 5786
				base_flags |= tsflags << 12;
			}
		}
	}
#if TG3_VLAN_TAG_USED
	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
		base_flags |= (TXD_FLAG_VLAN |
			       (vlan_tx_tag_get(skb) << 16));
#endif

M
Matt Carlson 已提交
5787
	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5788 5789 5790
	    !mss && skb->len > ETH_DATA_LEN)
		base_flags |= TXD_FLAG_JMB_PKT;

5791 5792 5793 5794
	len = skb_headlen(skb);

	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5795 5796 5797 5798
		dev_kfree_skb(skb);
		goto out_unlock;
	}

5799
	tnapi->tx_buffers[entry].skb = skb;
5800
	pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
L
Linus Torvalds 已提交
5801 5802 5803

	would_hit_hwbug = 0;

M
Matt Carlson 已提交
5804 5805 5806
	if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
		would_hit_hwbug = 1;

5807 5808 5809 5810 5811 5812
	if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
	    tg3_4g_overflow_test(mapping, len))
		would_hit_hwbug = 1;

	if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
	    tg3_40bit_overflow_test(tp, mapping, len))
M
Matt Carlson 已提交
5813
		would_hit_hwbug = 1;
5814 5815

	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5816
		would_hit_hwbug = 1;
L
Linus Torvalds 已提交
5817

5818
	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
L
Linus Torvalds 已提交
5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829
		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));

	entry = NEXT_TX(entry);

	/* Now loop through additional data fragments, and queue them. */
	if (skb_shinfo(skb)->nr_frags > 0) {
		last = skb_shinfo(skb)->nr_frags - 1;
		for (i = 0; i <= last; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

			len = frag->size;
5830 5831 5832 5833
			mapping = pci_map_page(tp->pdev,
					       frag->page,
					       frag->page_offset,
					       len, PCI_DMA_TODEVICE);
L
Linus Torvalds 已提交
5834

5835
			tnapi->tx_buffers[entry].skb = NULL;
5836 5837 5838 5839
			pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
					   mapping);
			if (pci_dma_mapping_error(tp->pdev, mapping))
				goto dma_error;
L
Linus Torvalds 已提交
5840

M
Matt Carlson 已提交
5841 5842 5843 5844
			if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
			    len <= 8)
				would_hit_hwbug = 1;

5845 5846
			if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
			    tg3_4g_overflow_test(mapping, len))
5847
				would_hit_hwbug = 1;
L
Linus Torvalds 已提交
5848

5849 5850
			if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
			    tg3_40bit_overflow_test(tp, mapping, len))
M
Michael Chan 已提交
5851 5852
				would_hit_hwbug = 1;

L
Linus Torvalds 已提交
5853
			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5854
				tg3_set_txd(tnapi, entry, mapping, len,
L
Linus Torvalds 已提交
5855 5856
					    base_flags, (i == last)|(mss << 1));
			else
5857
				tg3_set_txd(tnapi, entry, mapping, len,
L
Linus Torvalds 已提交
5858 5859 5860 5861 5862 5863 5864 5865 5866 5867
					    base_flags, (i == last));

			entry = NEXT_TX(entry);
		}
	}

	if (would_hit_hwbug) {
		u32 last_plus_one = entry;
		u32 start;

5868 5869
		start = entry - 1 - skb_shinfo(skb)->nr_frags;
		start &= (TG3_TX_RING_SIZE - 1);
L
Linus Torvalds 已提交
5870 5871 5872 5873

		/* If the workaround fails due to memory/mapping
		 * failure, silently drop this packet.
		 */
5874
		if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5875
						&start, base_flags, mss))
L
Linus Torvalds 已提交
5876 5877 5878 5879 5880 5881
			goto out_unlock;

		entry = start;
	}

	/* Packets are ready, update Tx producer idx local and on card. */
5882
	tw32_tx_mbox(tnapi->prodmbox, entry);
L
Linus Torvalds 已提交
5883

5884 5885
	tnapi->tx_prod = entry;
	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5886
		netif_tx_stop_queue(txq);
5887
		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5888
			netif_tx_wake_queue(txq);
5889
	}
L
Linus Torvalds 已提交
5890 5891

out_unlock:
E
Eric Dumazet 已提交
5892
	mmiowb();
L
Linus Torvalds 已提交
5893 5894

	return NETDEV_TX_OK;
5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915

dma_error:
	last = i;
	entry = tnapi->tx_prod;
	tnapi->tx_buffers[entry].skb = NULL;
	pci_unmap_single(tp->pdev,
			 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping),
			 skb_headlen(skb),
			 PCI_DMA_TODEVICE);
	for (i = 0; i <= last; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		entry = NEXT_TX(entry);

		pci_unmap_page(tp->pdev,
			       pci_unmap_addr(&tnapi->tx_buffers[entry],
					      mapping),
			       frag->size, PCI_DMA_TODEVICE);
	}

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
L
Linus Torvalds 已提交
5916 5917 5918 5919 5920 5921 5922
}

static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
			       int new_mtu)
{
	dev->mtu = new_mtu;

M
Michael Chan 已提交
5923
	if (new_mtu > ETH_DATA_LEN) {
M
Michael Chan 已提交
5924
		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
M
Michael Chan 已提交
5925 5926 5927 5928 5929 5930
			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
			ethtool_op_set_tso(dev, 0);
		}
		else
			tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
	} else {
M
Michael Chan 已提交
5931
		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
M
Michael Chan 已提交
5932
			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5933
		tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
M
Michael Chan 已提交
5934
	}
L
Linus Torvalds 已提交
5935 5936 5937 5938 5939
}

static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
	struct tg3 *tp = netdev_priv(dev);
M
Michael Chan 已提交
5940
	int err;
L
Linus Torvalds 已提交
5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952

	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
		return -EINVAL;

	if (!netif_running(dev)) {
		/* We'll just catch it later when the
		 * device is up'd.
		 */
		tg3_set_mtu(dev, tp, new_mtu);
		return 0;
	}

M
Matt Carlson 已提交
5953 5954
	tg3_phy_stop(tp);

L
Linus Torvalds 已提交
5955
	tg3_netif_stop(tp);
5956 5957

	tg3_full_lock(tp, 1);
L
Linus Torvalds 已提交
5958

M
Michael Chan 已提交
5959
	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
L
Linus Torvalds 已提交
5960 5961 5962

	tg3_set_mtu(dev, tp, new_mtu);

M
Michael Chan 已提交
5963
	err = tg3_restart_hw(tp, 0);
L
Linus Torvalds 已提交
5964

M
Michael Chan 已提交
5965 5966
	if (!err)
		tg3_netif_start(tp);
L
Linus Torvalds 已提交
5967

5968
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
5969

M
Matt Carlson 已提交
5970 5971 5972
	if (!err)
		tg3_phy_start(tp);

M
Michael Chan 已提交
5973
	return err;
L
Linus Torvalds 已提交
5974 5975
}

5976 5977
static void tg3_rx_prodring_free(struct tg3 *tp,
				 struct tg3_rx_prodring_set *tpr)
L
Linus Torvalds 已提交
5978 5979 5980
{
	int i;

M
Matt Carlson 已提交
5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995
	if (tpr != &tp->prodring[0]) {
		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
		     i = (i + 1) % TG3_RX_RING_SIZE)
			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
					tp->rx_pkt_map_sz);

		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
			for (i = tpr->rx_jmb_cons_idx;
			     i != tpr->rx_jmb_prod_idx;
			     i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
						TG3_RX_JMB_MAP_SZ);
			}
		}

5996
		return;
M
Matt Carlson 已提交
5997
	}
L
Linus Torvalds 已提交
5998

5999 6000 6001
	for (i = 0; i < TG3_RX_RING_SIZE; i++)
		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
				tp->rx_pkt_map_sz);
L
Linus Torvalds 已提交
6002

6003
	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6004 6005 6006
		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
					TG3_RX_JMB_MAP_SZ);
L
Linus Torvalds 已提交
6007 6008 6009 6010 6011 6012 6013 6014 6015 6016
	}
}

/* Initialize tx/rx rings for packet processing.
 *
 * The chip has been shut down and the driver detached from
 * the networking, so no interrupts or new tx packets will
 * end up in the driver.  tp->{tx,}lock are held and thus
 * we may not sleep.
 */
6017 6018
static int tg3_rx_prodring_alloc(struct tg3 *tp,
				 struct tg3_rx_prodring_set *tpr)
L
Linus Torvalds 已提交
6019
{
6020
	u32 i, rx_pkt_dma_sz;
L
Linus Torvalds 已提交
6021

M
Matt Carlson 已提交
6022 6023 6024 6025 6026
	tpr->rx_std_cons_idx = 0;
	tpr->rx_std_prod_idx = 0;
	tpr->rx_jmb_cons_idx = 0;
	tpr->rx_jmb_prod_idx = 0;

6027 6028 6029 6030 6031 6032 6033 6034
	if (tpr != &tp->prodring[0]) {
		memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
			memset(&tpr->rx_jmb_buffers[0], 0,
			       TG3_RX_JMB_BUFF_RING_SIZE);
		goto done;
	}

L
Linus Torvalds 已提交
6035
	/* Zero out all descriptors. */
6036
	memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
L
Linus Torvalds 已提交
6037

6038
	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
M
Michael Chan 已提交
6039
	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6040 6041 6042
	    tp->dev->mtu > ETH_DATA_LEN)
		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6043

L
Linus Torvalds 已提交
6044 6045 6046 6047 6048 6049 6050
	/* Initialize invariants of the rings, we only set this
	 * stuff once.  This works because the card does not
	 * write into the rx buffer posting rings.
	 */
	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
		struct tg3_rx_buffer_desc *rxd;

6051
		rxd = &tpr->rx_std[i];
6052
		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
L
Linus Torvalds 已提交
6053 6054 6055 6056 6057 6058 6059
		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
		rxd->opaque = (RXD_OPAQUE_RING_STD |
			       (i << RXD_OPAQUE_INDEX_SHIFT));
	}

	/* Now allocate fresh SKBs for each rx ring. */
	for (i = 0; i < tp->rx_pending; i++) {
M
Matt Carlson 已提交
6060
		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6061 6062
			netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
				    i, tp->rx_pending);
6063
			if (i == 0)
6064
				goto initfail;
6065
			tp->rx_pending = i;
L
Linus Torvalds 已提交
6066
			break;
6067
		}
L
Linus Torvalds 已提交
6068 6069
	}

6070 6071 6072
	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
		goto done;

6073
	memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6074

6075 6076
	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
		goto done;
6077

6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090
	for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
		struct tg3_rx_buffer_desc *rxd;

		rxd = &tpr->rx_jmb[i].std;
		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
				  RXD_FLAG_JUMBO;
		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
		       (i << RXD_OPAQUE_INDEX_SHIFT));
	}

	for (i = 0; i < tp->rx_jumbo_pending; i++) {
		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6091 6092
			netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
				    i, tp->rx_jumbo_pending);
6093 6094 6095 6096
			if (i == 0)
				goto initfail;
			tp->rx_jumbo_pending = i;
			break;
L
Linus Torvalds 已提交
6097 6098
		}
	}
6099 6100

done:
6101
	return 0;
6102 6103

initfail:
6104
	tg3_rx_prodring_free(tp, tpr);
6105
	return -ENOMEM;
L
Linus Torvalds 已提交
6106 6107
}

6108 6109
static void tg3_rx_prodring_fini(struct tg3 *tp,
				 struct tg3_rx_prodring_set *tpr)
L
Linus Torvalds 已提交
6110
{
6111 6112 6113 6114 6115
	kfree(tpr->rx_std_buffers);
	tpr->rx_std_buffers = NULL;
	kfree(tpr->rx_jmb_buffers);
	tpr->rx_jmb_buffers = NULL;
	if (tpr->rx_std) {
L
Linus Torvalds 已提交
6116
		pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6117 6118
				    tpr->rx_std, tpr->rx_std_mapping);
		tpr->rx_std = NULL;
L
Linus Torvalds 已提交
6119
	}
6120
	if (tpr->rx_jmb) {
L
Linus Torvalds 已提交
6121
		pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6122 6123
				    tpr->rx_jmb, tpr->rx_jmb_mapping);
		tpr->rx_jmb = NULL;
L
Linus Torvalds 已提交
6124
	}
6125 6126
}

6127 6128
static int tg3_rx_prodring_init(struct tg3 *tp,
				struct tg3_rx_prodring_set *tpr)
6129
{
6130
	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6131
	if (!tpr->rx_std_buffers)
6132 6133
		return -ENOMEM;

6134 6135 6136
	tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
					   &tpr->rx_std_mapping);
	if (!tpr->rx_std)
6137 6138 6139
		goto err_out;

	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6140
		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6141 6142
					      GFP_KERNEL);
		if (!tpr->rx_jmb_buffers)
6143 6144
			goto err_out;

6145 6146 6147 6148
		tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
						   TG3_RX_JUMBO_RING_BYTES,
						   &tpr->rx_jmb_mapping);
		if (!tpr->rx_jmb)
6149 6150 6151 6152 6153 6154
			goto err_out;
	}

	return 0;

err_out:
6155
	tg3_rx_prodring_fini(tp, tpr);
6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167
	return -ENOMEM;
}

/* Free up pending packets in all rx/tx rings.
 *
 * The chip has been shut down and the driver detached from
 * the networking, so no interrupts or new tx packets will
 * end up in the driver.  tp->{tx,}lock is not held and we are not
 * in an interrupt context and thus may sleep.
 */
static void tg3_free_rings(struct tg3 *tp)
{
6168
	int i, j;
6169

6170 6171
	for (j = 0; j < tp->irq_cnt; j++) {
		struct tg3_napi *tnapi = &tp->napi[j];
6172

6173 6174 6175
		if (!tnapi->tx_buffers)
			continue;

6176
		for (i = 0; i < TG3_TX_RING_SIZE; ) {
6177
			struct ring_info *txp;
6178
			struct sk_buff *skb;
6179
			unsigned int k;
6180

6181 6182
			txp = &tnapi->tx_buffers[i];
			skb = txp->skb;
6183

6184 6185 6186 6187
			if (skb == NULL) {
				i++;
				continue;
			}
6188

6189 6190 6191 6192
			pci_unmap_single(tp->pdev,
					 pci_unmap_addr(txp, mapping),
					 skb_headlen(skb),
					 PCI_DMA_TODEVICE);
6193
			txp->skb = NULL;
6194

6195 6196 6197 6198 6199 6200 6201 6202 6203 6204
			i++;

			for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
				txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
				pci_unmap_page(tp->pdev,
					       pci_unmap_addr(txp, mapping),
					       skb_shinfo(skb)->frags[k].size,
					       PCI_DMA_TODEVICE);
				i++;
			}
6205 6206 6207

			dev_kfree_skb_any(skb);
		}
6208

6209
		tg3_rx_prodring_free(tp, &tp->prodring[j]);
6210
	}
6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221
}

/* Initialize tx/rx rings for packet processing.
 *
 * The chip has been shut down and the driver detached from
 * the networking, so no interrupts or new tx packets will
 * end up in the driver.  tp->{tx,}lock are held and thus
 * we may not sleep.
 */
static int tg3_init_rings(struct tg3 *tp)
{
6222
	int i;
6223

6224 6225 6226
	/* Free up all the SKBs. */
	tg3_free_rings(tp);

6227 6228 6229 6230 6231 6232 6233 6234
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];

		tnapi->last_tag = 0;
		tnapi->last_irq_tag = 0;
		tnapi->hw_status->status = 0;
		tnapi->hw_status->status_tag = 0;
		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6235

6236 6237
		tnapi->tx_prod = 0;
		tnapi->tx_cons = 0;
6238 6239
		if (tnapi->tx_ring)
			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6240 6241

		tnapi->rx_rcb_ptr = 0;
6242 6243
		if (tnapi->rx_rcb)
			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6244

6245 6246
		if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
			tg3_free_rings(tp);
6247
			return -ENOMEM;
6248
		}
6249
	}
6250

6251
	return 0;
6252 6253 6254 6255 6256 6257 6258 6259
}

/*
 * Must not be invoked with interrupt sources disabled and
 * the hardware shutdown down.
 */
static void tg3_free_consistent(struct tg3 *tp)
{
6260
	int i;
6261

6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];

		if (tnapi->tx_ring) {
			pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
				tnapi->tx_ring, tnapi->tx_desc_mapping);
			tnapi->tx_ring = NULL;
		}

		kfree(tnapi->tx_buffers);
		tnapi->tx_buffers = NULL;

		if (tnapi->rx_rcb) {
			pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
					    tnapi->rx_rcb,
					    tnapi->rx_rcb_mapping);
			tnapi->rx_rcb = NULL;
		}

		if (tnapi->hw_status) {
			pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
					    tnapi->hw_status,
					    tnapi->status_mapping);
			tnapi->hw_status = NULL;
		}
L
Linus Torvalds 已提交
6287
	}
6288

L
Linus Torvalds 已提交
6289 6290 6291 6292 6293
	if (tp->hw_stats) {
		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
				    tp->hw_stats, tp->stats_mapping);
		tp->hw_stats = NULL;
	}
6294

6295
	for (i = 0; i < tp->irq_cnt; i++)
6296
		tg3_rx_prodring_fini(tp, &tp->prodring[i]);
L
Linus Torvalds 已提交
6297 6298 6299 6300 6301 6302 6303 6304
}

/*
 * Must not be invoked with interrupt sources disabled and
 * the hardware shutdown down.  Can sleep.
 */
static int tg3_alloc_consistent(struct tg3 *tp)
{
6305
	int i;
6306

6307
	for (i = 0; i < tp->irq_cnt; i++) {
6308 6309 6310
		if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
			goto err_out;
	}
L
Linus Torvalds 已提交
6311

6312 6313 6314 6315
	tp->hw_stats = pci_alloc_consistent(tp->pdev,
					    sizeof(struct tg3_hw_stats),
					    &tp->stats_mapping);
	if (!tp->hw_stats)
L
Linus Torvalds 已提交
6316 6317
		goto err_out;

6318
	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
L
Linus Torvalds 已提交
6319

6320 6321
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];
6322
		struct tg3_hw_status *sblk;
L
Linus Torvalds 已提交
6323

6324 6325 6326 6327 6328
		tnapi->hw_status = pci_alloc_consistent(tp->pdev,
							TG3_HW_STATUS_SIZE,
							&tnapi->status_mapping);
		if (!tnapi->hw_status)
			goto err_out;
6329

6330
		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6331 6332
		sblk = tnapi->hw_status;

6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350
		/* If multivector TSS is enabled, vector 0 does not handle
		 * tx interrupts.  Don't allocate any resources for it.
		 */
		if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
		    (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
			tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
						    TG3_TX_RING_SIZE,
						    GFP_KERNEL);
			if (!tnapi->tx_buffers)
				goto err_out;

			tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
							      TG3_TX_RING_BYTES,
						       &tnapi->tx_desc_mapping);
			if (!tnapi->tx_ring)
				goto err_out;
		}

6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370
		/*
		 * When RSS is enabled, the status block format changes
		 * slightly.  The "rx_jumbo_consumer", "reserved",
		 * and "rx_mini_consumer" members get mapped to the
		 * other three rx return ring producer indexes.
		 */
		switch (i) {
		default:
			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
			break;
		case 2:
			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
			break;
		case 3:
			tnapi->rx_rcb_prod_idx = &sblk->reserved;
			break;
		case 4:
			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
			break;
		}
6371

6372
		tnapi->prodring = &tp->prodring[i];
M
Matt Carlson 已提交
6373

6374 6375 6376 6377 6378 6379 6380
		/*
		 * If multivector RSS is enabled, vector 0 does not handle
		 * rx or tx interrupts.  Don't allocate any resources for it.
		 */
		if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
			continue;

6381 6382 6383 6384 6385
		tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
						     TG3_RX_RCB_RING_BYTES(tp),
						     &tnapi->rx_rcb_mapping);
		if (!tnapi->rx_rcb)
			goto err_out;
6386

6387 6388
		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
	}
L
Linus Torvalds 已提交
6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401

	return 0;

err_out:
	tg3_free_consistent(tp);
	return -ENOMEM;
}

#define MAX_WAIT_CNT 1000

/* To stop a block, clear the enable bit and poll till it
 * clears.  tp->lock is held.
 */
6402
static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
L
Linus Torvalds 已提交
6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420
{
	unsigned int i;
	u32 val;

	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
		switch (ofs) {
		case RCVLSC_MODE:
		case DMAC_MODE:
		case MBFREE_MODE:
		case BUFMGR_MODE:
		case MEMARB_MODE:
			/* We can't enable/disable these bits of the
			 * 5705/5750, just say success.
			 */
			return 0;

		default:
			break;
6421
		}
L
Linus Torvalds 已提交
6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434
	}

	val = tr32(ofs);
	val &= ~enable_bit;
	tw32_f(ofs, val);

	for (i = 0; i < MAX_WAIT_CNT; i++) {
		udelay(100);
		val = tr32(ofs);
		if ((val & enable_bit) == 0)
			break;
	}

6435
	if (i == MAX_WAIT_CNT && !silent) {
6436
		pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
L
Linus Torvalds 已提交
6437 6438 6439 6440 6441 6442 6443 6444
		       ofs, enable_bit);
		return -ENODEV;
	}

	return 0;
}

/* tp->lock is held. */
6445
static int tg3_abort_hw(struct tg3 *tp, int silent)
L
Linus Torvalds 已提交
6446 6447 6448 6449 6450 6451 6452 6453 6454
{
	int i, err;

	tg3_disable_ints(tp);

	tp->rx_mode &= ~RX_MODE_ENABLE;
	tw32_f(MAC_RX_MODE, tp->rx_mode);
	udelay(10);

6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468
	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);

	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
L
Linus Torvalds 已提交
6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482

	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
	tw32_f(MAC_MODE, tp->mac_mode);
	udelay(40);

	tp->tx_mode &= ~TX_MODE_ENABLE;
	tw32_f(MAC_TX_MODE, tp->tx_mode);

	for (i = 0; i < MAX_WAIT_CNT; i++) {
		udelay(100);
		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
			break;
	}
	if (i >= MAX_WAIT_CNT) {
6483 6484
		netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
			   __func__, tr32(MAC_TX_MODE));
6485
		err |= -ENODEV;
L
Linus Torvalds 已提交
6486 6487
	}

6488
	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6489 6490
	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
L
Linus Torvalds 已提交
6491 6492 6493 6494

	tw32(FTQ_RESET, 0xffffffff);
	tw32(FTQ_RESET, 0x00000000);

6495 6496
	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
L
Linus Torvalds 已提交
6497

6498 6499 6500 6501 6502
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];
		if (tnapi->hw_status)
			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
	}
L
Linus Torvalds 已提交
6503 6504 6505 6506 6507 6508
	if (tp->hw_stats)
		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));

	return err;
}

M
Matt Carlson 已提交
6509 6510 6511 6512 6513 6514 6515 6516 6517 6518
static void tg3_ape_send_event(struct tg3 *tp, u32 event)
{
	int i;
	u32 apedata;

	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
	if (apedata != APE_SEG_SIG_MAGIC)
		return;

	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
M
Matt Carlson 已提交
6519
	if (!(apedata & APE_FW_STATUS_READY))
M
Matt Carlson 已提交
6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568
		return;

	/* Wait for up to 1 millisecond for APE to service previous event. */
	for (i = 0; i < 10; i++) {
		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
			return;

		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);

		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
					event | APE_EVENT_STATUS_EVENT_PENDING);

		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);

		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
			break;

		udelay(100);
	}

	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
}

static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
{
	u32 event;
	u32 apedata;

	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
		return;

	switch (kind) {
		case RESET_KIND_INIT:
			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
					APE_HOST_SEG_SIG_MAGIC);
			tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
					APE_HOST_SEG_LEN_MAGIC);
			apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
			tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
			tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
					APE_HOST_DRIVER_ID_MAGIC);
			tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
					APE_HOST_BEHAV_NO_PHYLOCK);

			event = APE_EVENT_STATUS_STATE_START;
			break;
		case RESET_KIND_SHUTDOWN:
6569 6570 6571 6572 6573 6574 6575
			/* With the interface we are currently using,
			 * APE does not track driver state.  Wiping
			 * out the HOST SEGMENT SIGNATURE forces
			 * the APE to assume OS absent status.
			 */
			tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);

M
Matt Carlson 已提交
6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589
			event = APE_EVENT_STATUS_STATE_UNLOAD;
			break;
		case RESET_KIND_SUSPEND:
			event = APE_EVENT_STATUS_STATE_SUSPEND;
			break;
		default:
			return;
	}

	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;

	tg3_ape_send_event(tp, event);
}

L
Linus Torvalds 已提交
6590 6591 6592
/* tp->lock is held. */
static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
{
6593 6594
	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
L
Linus Torvalds 已提交
6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614

	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
		switch (kind) {
		case RESET_KIND_INIT:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_START);
			break;

		case RESET_KIND_SHUTDOWN:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_UNLOAD);
			break;

		case RESET_KIND_SUSPEND:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_SUSPEND);
			break;

		default:
			break;
6615
		}
L
Linus Torvalds 已提交
6616
	}
M
Matt Carlson 已提交
6617 6618 6619 6620

	if (kind == RESET_KIND_INIT ||
	    kind == RESET_KIND_SUSPEND)
		tg3_ape_driver_state_change(tp, kind);
L
Linus Torvalds 已提交
6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639
}

/* tp->lock is held. */
static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
{
	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
		switch (kind) {
		case RESET_KIND_INIT:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_START_DONE);
			break;

		case RESET_KIND_SHUTDOWN:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_UNLOAD_DONE);
			break;

		default:
			break;
6640
		}
L
Linus Torvalds 已提交
6641
	}
M
Matt Carlson 已提交
6642 6643 6644

	if (kind == RESET_KIND_SHUTDOWN)
		tg3_ape_driver_state_change(tp, kind);
L
Linus Torvalds 已提交
6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668
}

/* tp->lock is held. */
static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
{
	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
		switch (kind) {
		case RESET_KIND_INIT:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_START);
			break;

		case RESET_KIND_SHUTDOWN:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_UNLOAD);
			break;

		case RESET_KIND_SUSPEND:
			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
				      DRV_STATE_SUSPEND);
			break;

		default:
			break;
6669
		}
L
Linus Torvalds 已提交
6670 6671 6672
	}
}

M
Michael Chan 已提交
6673 6674 6675 6676 6677
static int tg3_poll_fw(struct tg3 *tp)
{
	int i;
	u32 val;

M
Michael Chan 已提交
6678
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6679 6680
		/* Wait up to 20ms for init done. */
		for (i = 0; i < 200; i++) {
M
Michael Chan 已提交
6681 6682
			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
				return 0;
6683
			udelay(100);
M
Michael Chan 已提交
6684 6685 6686 6687
		}
		return -ENODEV;
	}

M
Michael Chan 已提交
6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704
	/* Wait for firmware initialization to complete. */
	for (i = 0; i < 100000; i++) {
		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
			break;
		udelay(10);
	}

	/* Chip might not be fitted with firmware.  Some Sun onboard
	 * parts are configured like that.  So don't signal the timeout
	 * of the above loop as an error, but do report the lack of
	 * running firmware once.
	 */
	if (i >= 100000 &&
	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;

6705
		netdev_info(tp->dev, "No firmware running\n");
M
Michael Chan 已提交
6706 6707
	}

6708 6709 6710 6711 6712 6713 6714
	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
		/* The 57765 A0 needs a little more
		 * time to do some important work.
		 */
		mdelay(10);
	}

M
Michael Chan 已提交
6715 6716 6717
	return 0;
}

6718 6719 6720
/* Save PCI command register before chip reset */
static void tg3_save_pci_state(struct tg3 *tp)
{
M
Matt Carlson 已提交
6721
	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737
}

/* Restore PCI state after chip reset */
static void tg3_restore_pci_state(struct tg3 *tp)
{
	u32 val;

	/* Re-enable indirect register accesses. */
	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
			       tp->misc_host_ctrl);

	/* Set MAX PCI retry to zero. */
	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
		val |= PCISTATE_RETRY_SAME_DMA;
M
Matt Carlson 已提交
6738 6739 6740 6741
	/* Allow reads and writes to the APE register and memory space. */
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
		       PCISTATE_ALLOW_APE_SHMEM_WR;
6742 6743
	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);

M
Matt Carlson 已提交
6744
	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6745

M
Matt Carlson 已提交
6746 6747 6748 6749 6750 6751 6752 6753 6754
	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
			pcie_set_readrq(tp->pdev, 4096);
		else {
			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
					      tp->pci_cacheline_sz);
			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
					      tp->pci_lat_timer);
		}
6755
	}
M
Matt Carlson 已提交
6756

6757
	/* Make sure PCI-X relaxed ordering bit is clear. */
M
Matt Carlson 已提交
6758
	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
M
Matt Carlson 已提交
6759 6760 6761 6762 6763 6764 6765 6766
		u16 pcix_cmd;

		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				     &pcix_cmd);
		pcix_cmd &= ~PCI_X_CMD_ERO;
		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				      pcix_cmd);
	}
6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787

	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {

		/* Chip reset on 5780 will reset MSI enable bit,
		 * so need to restore it.
		 */
		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
			u16 ctrl;

			pci_read_config_word(tp->pdev,
					     tp->msi_cap + PCI_MSI_FLAGS,
					     &ctrl);
			pci_write_config_word(tp->pdev,
					      tp->msi_cap + PCI_MSI_FLAGS,
					      ctrl | PCI_MSI_FLAGS_ENABLE);
			val = tr32(MSGINT_MODE);
			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
		}
	}
}

L
Linus Torvalds 已提交
6788 6789 6790 6791 6792 6793
static void tg3_stop_fw(struct tg3 *);

/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
{
	u32 val;
6794
	void (*write_op)(struct tg3 *, u32, u32);
6795
	int i, err;
L
Linus Torvalds 已提交
6796

6797 6798
	tg3_nvram_lock(tp);

6799 6800
	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);

6801 6802 6803 6804
	/* No matching tg3_nvram_unlock() after this because
	 * chip reset below will undo the nvram lock.
	 */
	tp->nvram_lock_cnt = 0;
L
Linus Torvalds 已提交
6805

6806 6807 6808 6809 6810 6811
	/* GRC_MISC_CFG core clock reset will clear the memory
	 * enable bit in PCI register 4 and the MSI enable bit
	 * on some chips, so we save relevant registers here.
	 */
	tg3_save_pci_state(tp);

6812
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
M
Matt Carlson 已提交
6813
	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6814 6815
		tw32(GRC_FASTBOOT_PC, 0);

L
Linus Torvalds 已提交
6816 6817 6818 6819 6820 6821
	/*
	 * We must avoid the readl() that normally takes place.
	 * It locks machines, causes machine checks, and other
	 * fun things.  So, temporarily disable the 5701
	 * hardware workaround, while we do the reset.
	 */
6822 6823 6824
	write_op = tp->write32;
	if (write_op == tg3_write_flush_reg32)
		tp->write32 = tg3_write32;
L
Linus Torvalds 已提交
6825

6826 6827 6828 6829 6830 6831 6832
	/* Prevent the irq handler from reading or writing PCI registers
	 * during chip reset when the memory enable bit in the PCI command
	 * register may be cleared.  The chip does not generate interrupt
	 * at this time, but the irq handler may still be called due to irq
	 * sharing or irqpoll.
	 */
	tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6833 6834 6835 6836 6837 6838 6839 6840
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];
		if (tnapi->hw_status) {
			tnapi->hw_status->status = 0;
			tnapi->hw_status->status_tag = 0;
		}
		tnapi->last_tag = 0;
		tnapi->last_irq_tag = 0;
6841
	}
6842
	smp_mb();
6843 6844 6845

	for (i = 0; i < tp->irq_cnt; i++)
		synchronize_irq(tp->napi[i].irq_vec);
6846

M
Matt Carlson 已提交
6847 6848 6849 6850 6851
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
	}

L
Linus Torvalds 已提交
6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864
	/* do the reset */
	val = GRC_MISC_CFG_CORECLK_RESET;

	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
		if (tr32(0x7e2c) == 0x60) {
			tw32(0x7e2c, 0x20);
		}
		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
			tw32(GRC_MISC_CFG, (1 << 29));
			val |= (1 << 29);
		}
	}

M
Michael Chan 已提交
6865 6866 6867 6868 6869 6870
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
		tw32(GRC_VCPU_EXT_CTRL,
		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
	}

L
Linus Torvalds 已提交
6871 6872 6873 6874
	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
	tw32(GRC_MISC_CFG, val);

6875 6876
	/* restore 5701 hardware bug workaround write method */
	tp->write32 = write_op;
L
Linus Torvalds 已提交
6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902

	/* Unfortunately, we have to delay before the PCI read back.
	 * Some 575X chips even will not respond to a PCI cfg access
	 * when the reset command is given to the chip.
	 *
	 * How do these hardware designers expect things to work
	 * properly if the PCI write is posted for a long period
	 * of time?  It is always necessary to have some method by
	 * which a register read back can occur to push the write
	 * out which does the reset.
	 *
	 * For most tg3 variants the trick below was working.
	 * Ho hum...
	 */
	udelay(120);

	/* Flush PCI posted writes.  The normal MMIO registers
	 * are inaccessible at this time so this is the only
	 * way to make this reliably (actually, this is no longer
	 * the case, see above).  I tried to use indirect
	 * register read/write but this upset some 5701 variants.
	 */
	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);

	udelay(120);

6903
	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6904 6905
		u16 val16;

L
Linus Torvalds 已提交
6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917
		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
			int i;
			u32 cfg_val;

			/* Wait for link training to complete.  */
			for (i = 0; i < 5000; i++)
				udelay(100);

			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
			pci_write_config_dword(tp->pdev, 0xc4,
					       cfg_val | (1 << 15));
		}
6918

6919 6920 6921 6922 6923 6924 6925 6926 6927
		/* Clear the "no snoop" and "relaxed ordering" bits. */
		pci_read_config_word(tp->pdev,
				     tp->pcie_cap + PCI_EXP_DEVCTL,
				     &val16);
		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
			   PCI_EXP_DEVCTL_NOSNOOP_EN);
		/*
		 * Older PCIe devices only support the 128 byte
		 * MPS setting.  Enforce the restriction.
6928
		 */
6929 6930 6931
		if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6932 6933
		pci_write_config_word(tp->pdev,
				      tp->pcie_cap + PCI_EXP_DEVCTL,
6934
				      val16);
6935 6936 6937 6938 6939 6940 6941 6942 6943 6944

		pcie_set_readrq(tp->pdev, 4096);

		/* Clear error status */
		pci_write_config_word(tp->pdev,
				      tp->pcie_cap + PCI_EXP_DEVSTA,
				      PCI_EXP_DEVSTA_CED |
				      PCI_EXP_DEVSTA_NFED |
				      PCI_EXP_DEVSTA_FED |
				      PCI_EXP_DEVSTA_URD);
L
Linus Torvalds 已提交
6945 6946
	}

6947
	tg3_restore_pci_state(tp);
L
Linus Torvalds 已提交
6948

6949 6950
	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;

6951 6952
	val = 0;
	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
M
Michael Chan 已提交
6953
		val = tr32(MEMARB_MODE);
6954
	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
L
Linus Torvalds 已提交
6955 6956 6957 6958 6959 6960 6961 6962 6963

	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
		tg3_stop_fw(tp);
		tw32(0x5000, 0x400);
	}

	tw32(GRC_MODE, tp->grc_mode);

	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
A
Andy Gospodarek 已提交
6964
		val = tr32(0xc4);
L
Linus Torvalds 已提交
6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979

		tw32(0xc4, val | (1 << 15));
	}

	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
	}

	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
		tw32_f(MAC_MODE, tp->mac_mode);
M
Michael Chan 已提交
6980 6981 6982
	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
		tw32_f(MAC_MODE, tp->mac_mode);
6983 6984 6985 6986 6987
	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
		tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
		if (tp->mac_mode & MAC_MODE_APE_TX_EN)
			tp->mac_mode |= MAC_MODE_TDE_ENABLE;
		tw32_f(MAC_MODE, tp->mac_mode);
L
Linus Torvalds 已提交
6988 6989 6990 6991
	} else
		tw32_f(MAC_MODE, 0);
	udelay(40);

6992 6993
	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);

M
Michael Chan 已提交
6994 6995 6996
	err = tg3_poll_fw(tp);
	if (err)
		return err;
L
Linus Torvalds 已提交
6997

6998 6999
	tg3_mdio_start(tp);

M
Matt Carlson 已提交
7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
		u8 phy_addr;

		phy_addr = tp->phy_addr;
		tp->phy_addr = TG3_PHY_PCIE_ADDR;

		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
			     TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
		val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
		      TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
		      TG3_PCIEPHY_TX0CTRL1_NB_EN;
		tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
		udelay(10);

		tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
			     TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
		val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
		      TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
		tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
		udelay(10);

		tp->phy_addr = phy_addr;
	}

L
Linus Torvalds 已提交
7024
	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
M
Matt Carlson 已提交
7025 7026
	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
M
Matt Carlson 已提交
7027 7028
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
A
Andy Gospodarek 已提交
7029
		val = tr32(0x7c00);
L
Linus Torvalds 已提交
7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043

		tw32(0x7c00, val | (1 << 25));
	}

	/* Reprobe ASF enable state.  */
	tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
	tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
		u32 nic_cfg;

		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
M
Matt Carlson 已提交
7044
			tp->last_event_jiffies = jiffies;
7045
			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
L
Linus Torvalds 已提交
7046 7047 7048 7049 7050 7051 7052 7053 7054 7055
				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
		}
	}

	return 0;
}

/* tp->lock is held. */
static void tg3_stop_fw(struct tg3 *tp)
{
M
Matt Carlson 已提交
7056 7057
	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
	   !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7058 7059
		/* Wait for RX cpu to ACK the previous event. */
		tg3_wait_for_event_ack(tp);
L
Linus Torvalds 已提交
7060 7061

		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
M
Matt Carlson 已提交
7062 7063

		tg3_generate_fw_event(tp);
L
Linus Torvalds 已提交
7064

7065 7066
		/* Wait for RX cpu to ACK this event. */
		tg3_wait_for_event_ack(tp);
L
Linus Torvalds 已提交
7067 7068 7069 7070
	}
}

/* tp->lock is held. */
M
Michael Chan 已提交
7071
static int tg3_halt(struct tg3 *tp, int kind, int silent)
L
Linus Torvalds 已提交
7072 7073 7074 7075 7076
{
	int err;

	tg3_stop_fw(tp);

M
Michael Chan 已提交
7077
	tg3_write_sig_pre_reset(tp, kind);
L
Linus Torvalds 已提交
7078

7079
	tg3_abort_hw(tp, silent);
L
Linus Torvalds 已提交
7080 7081
	err = tg3_chip_reset(tp);

7082 7083
	__tg3_set_mac_addr(tp, 0);

M
Michael Chan 已提交
7084 7085
	tg3_write_sig_legacy(tp, kind);
	tg3_write_sig_post_reset(tp, kind);
L
Linus Torvalds 已提交
7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102

	if (err)
		return err;

	return 0;
}

#define RX_CPU_SCRATCH_BASE	0x30000
#define RX_CPU_SCRATCH_SIZE	0x04000
#define TX_CPU_SCRATCH_BASE	0x34000
#define TX_CPU_SCRATCH_SIZE	0x04000

/* tp->lock is held. */
static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
{
	int i;

7103 7104
	BUG_ON(offset == TX_CPU_BASE &&
	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
L
Linus Torvalds 已提交
7105

M
Michael Chan 已提交
7106 7107 7108 7109 7110 7111
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		u32 val = tr32(GRC_VCPU_EXT_CTRL);

		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
		return 0;
	}
L
Linus Torvalds 已提交
7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132
	if (offset == RX_CPU_BASE) {
		for (i = 0; i < 10000; i++) {
			tw32(offset + CPU_STATE, 0xffffffff);
			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
				break;
		}

		tw32(offset + CPU_STATE, 0xffffffff);
		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
		udelay(10);
	} else {
		for (i = 0; i < 10000; i++) {
			tw32(offset + CPU_STATE, 0xffffffff);
			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
				break;
		}
	}

	if (i >= 10000) {
7133 7134
		netdev_err(tp->dev, "%s timed out, %s CPU\n",
			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
L
Linus Torvalds 已提交
7135 7136
		return -ENODEV;
	}
M
Michael Chan 已提交
7137 7138 7139 7140

	/* Clear firmware's nvram arbitration. */
	if (tp->tg3_flags & TG3_FLAG_NVRAM)
		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
L
Linus Torvalds 已提交
7141 7142 7143 7144
	return 0;
}

struct fw_info {
7145 7146 7147
	unsigned int fw_base;
	unsigned int fw_len;
	const __be32 *fw_data;
L
Linus Torvalds 已提交
7148 7149 7150 7151 7152 7153
};

/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
				 int cpu_scratch_size, struct fw_info *info)
{
M
Michael Chan 已提交
7154
	int err, lock_err, i;
L
Linus Torvalds 已提交
7155 7156 7157 7158
	void (*write_op)(struct tg3 *, u32, u32);

	if (cpu_base == TX_CPU_BASE &&
	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7159 7160
		netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
			   __func__);
L
Linus Torvalds 已提交
7161 7162 7163 7164 7165 7166 7167 7168
		return -EINVAL;
	}

	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
		write_op = tg3_write_mem;
	else
		write_op = tg3_write_indirect_reg32;

7169 7170 7171
	/* It is possible that bootcode is still loading at this point.
	 * Get the nvram lock first before halting the cpu.
	 */
M
Michael Chan 已提交
7172
	lock_err = tg3_nvram_lock(tp);
L
Linus Torvalds 已提交
7173
	err = tg3_halt_cpu(tp, cpu_base);
M
Michael Chan 已提交
7174 7175
	if (!lock_err)
		tg3_nvram_unlock(tp);
L
Linus Torvalds 已提交
7176 7177 7178 7179 7180 7181 7182
	if (err)
		goto out;

	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
		write_op(tp, cpu_scratch_base + i, 0);
	tw32(cpu_base + CPU_STATE, 0xffffffff);
	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7183
	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
L
Linus Torvalds 已提交
7184
		write_op(tp, (cpu_scratch_base +
7185
			      (info->fw_base & 0xffff) +
L
Linus Torvalds 已提交
7186
			      (i * sizeof(u32))),
7187
			      be32_to_cpu(info->fw_data[i]));
L
Linus Torvalds 已提交
7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198

	err = 0;

out:
	return err;
}

/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
	struct fw_info info;
7199
	const __be32 *fw_data;
L
Linus Torvalds 已提交
7200 7201
	int err, i;

7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212
	fw_data = (void *)tp->fw->data;

	/* Firmware blob starts with version numbers, followed by
	   start address and length. We are setting complete length.
	   length = end_address_of_bss - start_address_of_text.
	   Remainder is the blob to be loaded contiguously
	   from start address. */

	info.fw_base = be32_to_cpu(fw_data[1]);
	info.fw_len = tp->fw->size - 12;
	info.fw_data = &fw_data[3];
L
Linus Torvalds 已提交
7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227

	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
				    &info);
	if (err)
		return err;

	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
				    &info);
	if (err)
		return err;

	/* Now startup only the RX cpu. */
	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7228
	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
L
Linus Torvalds 已提交
7229 7230

	for (i = 0; i < 5; i++) {
7231
		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
L
Linus Torvalds 已提交
7232 7233 7234
			break;
		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7235
		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
L
Linus Torvalds 已提交
7236 7237 7238
		udelay(1000);
	}
	if (i >= 5) {
7239 7240
		netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
L
Linus Torvalds 已提交
7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254
		return -ENODEV;
	}
	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);

	return 0;
}

/* 5705 needs a special version of the TSO firmware.  */

/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
	struct fw_info info;
7255
	const __be32 *fw_data;
L
Linus Torvalds 已提交
7256 7257 7258 7259 7260 7261
	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
	int err, i;

	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
		return 0;

7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274
	fw_data = (void *)tp->fw->data;

	/* Firmware blob starts with version numbers, followed by
	   start address and length. We are setting complete length.
	   length = end_address_of_bss - start_address_of_text.
	   Remainder is the blob to be loaded contiguously
	   from start address. */

	info.fw_base = be32_to_cpu(fw_data[1]);
	cpu_scratch_size = tp->fw_len;
	info.fw_len = tp->fw->size - 12;
	info.fw_data = &fw_data[3];

L
Linus Torvalds 已提交
7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
		cpu_base = RX_CPU_BASE;
		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
	} else {
		cpu_base = TX_CPU_BASE;
		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
	}

	err = tg3_load_firmware_cpu(tp, cpu_base,
				    cpu_scratch_base, cpu_scratch_size,
				    &info);
	if (err)
		return err;

	/* Now startup the cpu. */
	tw32(cpu_base + CPU_STATE, 0xffffffff);
7292
	tw32_f(cpu_base + CPU_PC, info.fw_base);
L
Linus Torvalds 已提交
7293 7294

	for (i = 0; i < 5; i++) {
7295
		if (tr32(cpu_base + CPU_PC) == info.fw_base)
L
Linus Torvalds 已提交
7296 7297 7298
			break;
		tw32(cpu_base + CPU_STATE, 0xffffffff);
		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7299
		tw32_f(cpu_base + CPU_PC, info.fw_base);
L
Linus Torvalds 已提交
7300 7301 7302
		udelay(1000);
	}
	if (i >= 5) {
7303 7304
		netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
L
Linus Torvalds 已提交
7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316
		return -ENODEV;
	}
	tw32(cpu_base + CPU_STATE, 0xffffffff);
	tw32_f(cpu_base + CPU_MODE,  0x00000000);
	return 0;
}


static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
	struct tg3 *tp = netdev_priv(dev);
	struct sockaddr *addr = p;
7317
	int err = 0, skip_mac_1 = 0;
L
Linus Torvalds 已提交
7318

M
Michael Chan 已提交
7319 7320 7321
	if (!is_valid_ether_addr(addr->sa_data))
		return -EINVAL;

L
Linus Torvalds 已提交
7322 7323
	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);

7324 7325 7326
	if (!netif_running(dev))
		return 0;

7327
	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7328
		u32 addr0_high, addr0_low, addr1_high, addr1_low;
7329

7330 7331 7332 7333 7334 7335 7336 7337 7338
		addr0_high = tr32(MAC_ADDR_0_HIGH);
		addr0_low = tr32(MAC_ADDR_0_LOW);
		addr1_high = tr32(MAC_ADDR_1_HIGH);
		addr1_low = tr32(MAC_ADDR_1_LOW);

		/* Skip MAC addr 1 if ASF is using it. */
		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
		    !(addr1_high == 0 && addr1_low == 0))
			skip_mac_1 = 1;
7339
	}
7340 7341 7342
	spin_lock_bh(&tp->lock);
	__tg3_set_mac_addr(tp, skip_mac_1);
	spin_unlock_bh(&tp->lock);
L
Linus Torvalds 已提交
7343

M
Michael Chan 已提交
7344
	return err;
L
Linus Torvalds 已提交
7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368
}

/* tp->lock is held. */
static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
			   dma_addr_t mapping, u32 maxlen_flags,
			   u32 nic_addr)
{
	tg3_write_mem(tp,
		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
		      ((u64) mapping >> 32));
	tg3_write_mem(tp,
		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
		      ((u64) mapping & 0xffffffff));
	tg3_write_mem(tp,
		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
		       maxlen_flags);

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
		tg3_write_mem(tp,
			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
			      nic_addr);
}

static void __tg3_set_rx_mode(struct net_device *);
M
Michael Chan 已提交
7369
static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7370
{
7371 7372
	int i;

7373
	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7374 7375 7376 7377 7378 7379 7380
		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
	} else {
		tw32(HOSTCC_TXCOL_TICKS, 0);
		tw32(HOSTCC_TXMAX_FRAMES, 0);
		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7381
	}
7382

7383 7384 7385 7386 7387
	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
	} else {
7388 7389 7390
		tw32(HOSTCC_RXCOL_TICKS, 0);
		tw32(HOSTCC_RXMAX_FRAMES, 0);
		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7391
	}
7392

7393 7394 7395
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		u32 val = ec->stats_block_coalesce_usecs;

7396 7397 7398
		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);

7399 7400 7401 7402 7403
		if (!netif_carrier_ok(tp->dev))
			val = 0;

		tw32(HOSTCC_STAT_COAL_TICKS, val);
	}
7404 7405 7406 7407 7408 7409 7410 7411 7412 7413

	for (i = 0; i < tp->irq_cnt - 1; i++) {
		u32 reg;

		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
		tw32(reg, ec->rx_coalesce_usecs);
		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
		tw32(reg, ec->rx_max_coalesced_frames);
		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
		tw32(reg, ec->rx_max_coalesced_frames_irq);
7414 7415 7416 7417 7418 7419 7420 7421 7422

		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
			tw32(reg, ec->tx_coalesce_usecs);
			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
			tw32(reg, ec->tx_max_coalesced_frames);
			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
			tw32(reg, ec->tx_max_coalesced_frames_irq);
		}
7423 7424 7425 7426 7427 7428
	}

	for (; i < tp->irq_max - 1; i++) {
		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7429 7430 7431 7432 7433 7434

		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
		}
7435
	}
7436
}
L
Linus Torvalds 已提交
7437

M
Matt Carlson 已提交
7438 7439 7440 7441
/* tp->lock is held. */
static void tg3_rings_reset(struct tg3 *tp)
{
	int i;
7442
	u32 stblk, txrcb, rxrcb, limit;
M
Matt Carlson 已提交
7443 7444 7445 7446 7447
	struct tg3_napi *tnapi = &tp->napi[0];

	/* Disable all transmit rings but the first. */
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
M
Matt Carlson 已提交
7448 7449
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
M
Matt Carlson 已提交
7450 7451 7452 7453 7454 7455 7456 7457 7458 7459
	else
		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;

	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
			      BDINFO_FLAGS_DISABLED);


	/* Disable all receive return rings but the first. */
M
Matt Carlson 已提交
7460 7461 7462
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
	else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
M
Matt Carlson 已提交
7463
		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
M
Matt Carlson 已提交
7464 7465
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478
		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
	else
		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;

	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
			      BDINFO_FLAGS_DISABLED);

	/* Disable interrupts */
	tw32_mailbox_f(tp->napi[0].int_mbox, 1);

	/* Zero mailbox registers. */
7479 7480 7481 7482
	if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
		for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
			tp->napi[i].tx_prod = 0;
			tp->napi[i].tx_cons = 0;
7483 7484
			if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
				tw32_mailbox(tp->napi[i].prodmbox, 0);
7485 7486 7487
			tw32_rx_mbox(tp->napi[i].consmbox, 0);
			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
		}
7488 7489
		if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
			tw32_mailbox(tp->napi[0].prodmbox, 0);
7490 7491 7492 7493 7494 7495
	} else {
		tp->napi[0].tx_prod = 0;
		tp->napi[0].tx_cons = 0;
		tw32_mailbox(tp->napi[0].prodmbox, 0);
		tw32_rx_mbox(tp->napi[0].consmbox, 0);
	}
M
Matt Carlson 已提交
7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515

	/* Make sure the NIC-based send BD rings are disabled. */
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
		for (i = 0; i < 16; i++)
			tw32_tx_mbox(mbox + i * 8, 0);
	}

	txrcb = NIC_SRAM_SEND_RCB;
	rxrcb = NIC_SRAM_RCV_RET_RCB;

	/* Clear status block in ram. */
	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);

	/* Set status block DMA address */
	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
	     ((u64) tnapi->status_mapping >> 32));
	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
	     ((u64) tnapi->status_mapping & 0xffffffff));

7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531
	if (tnapi->tx_ring) {
		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
			       (TG3_TX_RING_SIZE <<
				BDINFO_FLAGS_MAXLEN_SHIFT),
			       NIC_SRAM_TX_BUFFER_DESC);
		txrcb += TG3_BDINFO_SIZE;
	}

	if (tnapi->rx_rcb) {
		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
			       (TG3_RX_RCB_RING_SIZE(tp) <<
				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
		rxrcb += TG3_BDINFO_SIZE;
	}

	stblk = HOSTCC_STATBLCK_RING1;
M
Matt Carlson 已提交
7532

7533 7534 7535 7536 7537 7538 7539 7540
	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
		u64 mapping = (u64)tnapi->status_mapping;
		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);

		/* Clear status block in ram. */
		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);

7541 7542 7543 7544 7545 7546 7547
		if (tnapi->tx_ring) {
			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
				       (TG3_TX_RING_SIZE <<
					BDINFO_FLAGS_MAXLEN_SHIFT),
				       NIC_SRAM_TX_BUFFER_DESC);
			txrcb += TG3_BDINFO_SIZE;
		}
7548 7549 7550 7551 7552 7553 7554 7555

		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
			       (TG3_RX_RCB_RING_SIZE(tp) <<
				BDINFO_FLAGS_MAXLEN_SHIFT), 0);

		stblk += 8;
		rxrcb += TG3_BDINFO_SIZE;
	}
M
Matt Carlson 已提交
7556 7557
}

L
Linus Torvalds 已提交
7558
/* tp->lock is held. */
7559
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
L
Linus Torvalds 已提交
7560 7561 7562
{
	u32 val, rdmac_mode;
	int i, err, limit;
7563
	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
L
Linus Torvalds 已提交
7564 7565 7566 7567 7568 7569 7570 7571

	tg3_disable_ints(tp);

	tg3_stop_fw(tp);

	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);

	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7572
		tg3_abort_hw(tp, 1);
L
Linus Torvalds 已提交
7573 7574
	}

7575
	if (reset_phy)
7576 7577
		tg3_phy_reset(tp);

L
Linus Torvalds 已提交
7578 7579 7580 7581 7582 7583
	err = tg3_chip_reset(tp);
	if (err)
		return err;

	tg3_write_sig_legacy(tp, RESET_KIND_INIT);

7584
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
M
Matt Carlson 已提交
7585 7586 7587
		val = tr32(TG3_CPMU_CTRL);
		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
		tw32(TG3_CPMU_CTRL, val);
7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602

		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
		val |= CPMU_LSPD_10MB_MACCLK_6_25;
		tw32(TG3_CPMU_LSPD_10MB_CLK, val);

		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
		val |= CPMU_LNK_AWARE_MACCLK_6_25;
		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);

		val = tr32(TG3_CPMU_HST_ACC);
		val &= ~CPMU_HST_ACC_MACCLK_MASK;
		val |= CPMU_HST_ACC_MACCLK_6_25;
		tw32(TG3_CPMU_HST_ACC, val);
M
Matt Carlson 已提交
7603 7604
	}

M
Matt Carlson 已提交
7605 7606 7607 7608 7609
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
		       PCIE_PWR_MGMT_L1_THRESH_4MS;
		tw32(PCIE_PWR_MGMT_THRESH, val);
7610 7611 7612 7613 7614

		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);

		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
M
Matt Carlson 已提交
7615

M
Matt Carlson 已提交
7616 7617
		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
M
Matt Carlson 已提交
7618 7619
	}

7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633
	if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
		u32 grc_mode = tr32(GRC_MODE);

		/* Access the lower 1K of PL PCIE block registers. */
		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);

		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);

		tw32(GRC_MODE, grc_mode);
	}

L
Linus Torvalds 已提交
7634 7635 7636
	/* This works around an issue with Athlon chipsets on
	 * B3 tigon3 silicon.  This bit has no effect on any
	 * other revision.  But do not set this on PCI Express
7637
	 * chips and don't even touch the clocks if the CPMU is present.
L
Linus Torvalds 已提交
7638
	 */
7639 7640 7641 7642 7643
	if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
		if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
	}
L
Linus Torvalds 已提交
7644 7645 7646 7647 7648 7649 7650 7651

	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
		val = tr32(TG3PCI_PCISTATE);
		val |= PCISTATE_RETRY_SAME_DMA;
		tw32(TG3PCI_PCISTATE, val);
	}

M
Matt Carlson 已提交
7652 7653 7654 7655 7656 7657 7658 7659 7660 7661
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
		/* Allow reads and writes to the
		 * APE register and memory space.
		 */
		val = tr32(TG3PCI_PCISTATE);
		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
		       PCISTATE_ALLOW_APE_SHMEM_WR;
		tw32(TG3PCI_PCISTATE, val);
	}

L
Linus Torvalds 已提交
7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
		/* Enable some hw fixes.  */
		val = tr32(TG3PCI_MSI_DATA);
		val |= (1 << 26) | (1 << 28) | (1 << 29);
		tw32(TG3PCI_MSI_DATA, val);
	}

	/* Descriptor ring init may make accesses to the
	 * NIC SRAM area to setup the TX descriptors, so we
	 * can only do this after the hardware has been
	 * successfully reset.
	 */
7674 7675 7676
	err = tg3_init_rings(tp);
	if (err)
		return err;
L
Linus Torvalds 已提交
7677

M
Matt Carlson 已提交
7678 7679
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7680 7681 7682 7683 7684
		val = tr32(TG3PCI_DMA_RW_CTRL) &
		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
M
Matt Carlson 已提交
7685 7686 7687 7688 7689
		/* This value is determined during the probe time DMA
		 * engine test, tg3_test_dma.
		 */
		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
	}
L
Linus Torvalds 已提交
7690 7691 7692 7693 7694 7695

	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
			  GRC_MODE_4X_NIC_SEND_RINGS |
			  GRC_MODE_NO_TX_PHDR_CSUM |
			  GRC_MODE_NO_RX_PHDR_CSUM);
	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7696 7697 7698 7699 7700 7701 7702 7703

	/* Pseudo-header checksum is done by hardware logic and not
	 * the offload processers, so make the chip do the pseudo-
	 * header checksums on receive.  For transmit it is more
	 * convenient to do the pseudo-header checksum in software
	 * as Linux does that on transmit for us in all cases.
	 */
	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
L
Linus Torvalds 已提交
7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715

	tw32(GRC_MODE,
	     tp->grc_mode |
	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));

	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
	val = tr32(GRC_MISC_CFG);
	val &= ~0xff;
	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
	tw32(GRC_MISC_CFG, val);

	/* Initialize MBUF/DESC pool. */
7716
	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
L
Linus Torvalds 已提交
7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729
		/* Do nothing.  */
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
		else
			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
	}
	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
		int fw_len;

7730
		fw_len = tp->fw_len;
L
Linus Torvalds 已提交
7731 7732 7733 7734 7735 7736 7737
		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
		tw32(BUFMGR_MB_POOL_ADDR,
		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
		tw32(BUFMGR_MB_POOL_SIZE,
		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
	}

7738
	if (tp->dev->mtu <= ETH_DATA_LEN) {
L
Linus Torvalds 已提交
7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764
		tw32(BUFMGR_MB_RDMA_LOW_WATER,
		     tp->bufmgr_config.mbuf_read_dma_low_water);
		tw32(BUFMGR_MB_MACRX_LOW_WATER,
		     tp->bufmgr_config.mbuf_mac_rx_low_water);
		tw32(BUFMGR_MB_HIGH_WATER,
		     tp->bufmgr_config.mbuf_high_water);
	} else {
		tw32(BUFMGR_MB_RDMA_LOW_WATER,
		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
		tw32(BUFMGR_MB_MACRX_LOW_WATER,
		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
		tw32(BUFMGR_MB_HIGH_WATER,
		     tp->bufmgr_config.mbuf_high_water_jumbo);
	}
	tw32(BUFMGR_DMA_LOW_WATER,
	     tp->bufmgr_config.dma_low_water);
	tw32(BUFMGR_DMA_HIGH_WATER,
	     tp->bufmgr_config.dma_high_water);

	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
	for (i = 0; i < 2000; i++) {
		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
			break;
		udelay(10);
	}
	if (i >= 2000) {
7765
		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
L
Linus Torvalds 已提交
7766 7767 7768 7769
		return -ENODEV;
	}

	/* Setup replenish threshold. */
M
Michael Chan 已提交
7770 7771 7772 7773 7774
	val = tp->rx_pending / 8;
	if (val == 0)
		val = 1;
	else if (val > tp->rx_std_max_post)
		val = tp->rx_std_max_post;
M
Michael Chan 已提交
7775 7776 7777 7778 7779 7780 7781
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);

		if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
			val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
	}
M
Michael Chan 已提交
7782 7783

	tw32(RCVBDI_STD_THRESH, val);
L
Linus Torvalds 已提交
7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802

	/* Initialize TG3_BDINFO's at:
	 *  RCVDBDI_STD_BD:	standard eth size rx ring
	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
	 *
	 * like so:
	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
	 *                              ring attribute flags
	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
	 *
	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
	 *
	 * The size of each ring is fixed in the firmware, but the location is
	 * configurable.
	 */
	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7803
	     ((u64) tpr->rx_std_mapping >> 32));
L
Linus Torvalds 已提交
7804
	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7805
	     ((u64) tpr->rx_std_mapping & 0xffffffff));
7806
	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
M
Matt Carlson 已提交
7807 7808
		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
		     NIC_SRAM_RX_BUFFER_DESC);
L
Linus Torvalds 已提交
7809

7810 7811
	/* Disable the mini ring */
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
L
Linus Torvalds 已提交
7812 7813 7814
		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
		     BDINFO_FLAGS_DISABLED);

7815 7816 7817
	/* Program the jumbo buffer descriptor ring control
	 * blocks on those devices that have them.
	 */
7818
	if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7819
	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
L
Linus Torvalds 已提交
7820 7821 7822
		/* Setup replenish threshold. */
		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);

7823
		if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
L
Linus Torvalds 已提交
7824
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7825
			     ((u64) tpr->rx_jmb_mapping >> 32));
L
Linus Torvalds 已提交
7826
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7827
			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
L
Linus Torvalds 已提交
7828
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
M
Matt Carlson 已提交
7829 7830
			     (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
			     BDINFO_FLAGS_USE_EXT_RECV);
7831
			if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
M
Matt Carlson 已提交
7832 7833
				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
L
Linus Torvalds 已提交
7834 7835 7836 7837 7838
		} else {
			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
			     BDINFO_FLAGS_DISABLED);
		}

M
Matt Carlson 已提交
7839 7840
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
7841 7842 7843 7844
			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
			      (RX_STD_MAX_SIZE << 2);
		else
			val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7845 7846 7847 7848
	} else
		val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;

	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
L
Linus Torvalds 已提交
7849

M
Matt Carlson 已提交
7850
	tpr->rx_std_prod_idx = tp->rx_pending;
7851
	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
L
Linus Torvalds 已提交
7852

M
Matt Carlson 已提交
7853
	tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7854
			  tp->rx_jumbo_pending : 0;
7855
	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
L
Linus Torvalds 已提交
7856

M
Matt Carlson 已提交
7857 7858
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
M
Matt Carlson 已提交
7859 7860 7861 7862
		tw32(STD_REPLENISH_LWM, 32);
		tw32(JMB_REPLENISH_LWM, 16);
	}

M
Matt Carlson 已提交
7863 7864
	tg3_rings_reset(tp);

L
Linus Torvalds 已提交
7865
	/* Initialize MAC address and backoff seed. */
7866
	__tg3_set_mac_addr(tp, 0);
L
Linus Torvalds 已提交
7867 7868

	/* MTU + ethernet header + FCS + optional VLAN tag */
7869 7870
	tw32(MAC_RX_MTU_SIZE,
	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
L
Linus Torvalds 已提交
7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891

	/* The slot time is changed by tg3_setup_phy if we
	 * run at gigabit with half duplex.
	 */
	tw32(MAC_TX_LENGTHS,
	     (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
	     (6 << TX_LENGTHS_IPG_SHIFT) |
	     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));

	/* Receive rules. */
	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
	tw32(RCVLPC_CONFIG, 0x0181);

	/* Calculate RDMAC_MODE setting early, we need it to determine
	 * the RCVLPC_STATE_ENABLE mask.
	 */
	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
		      RDMAC_MODE_LNGREAD_ENAB);
M
Michael Chan 已提交
7892

7893 7894 7895
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;

M
Matt Carlson 已提交
7896
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
M
Matt Carlson 已提交
7897 7898
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
M
Matt Carlson 已提交
7899 7900 7901 7902
		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;

M
Michael Chan 已提交
7903 7904 7905 7906
	/* If statement applies to 5705 and 5750 PCI devices only */
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
L
Linus Torvalds 已提交
7907
		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
M
Matt Carlson 已提交
7908
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
L
Linus Torvalds 已提交
7909 7910 7911 7912 7913 7914 7915
			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
		}
	}

M
Michael Chan 已提交
7916 7917 7918
	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;

L
Linus Torvalds 已提交
7919
	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
M
Matt Carlson 已提交
7920 7921
		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;

M
Matt Carlson 已提交
7922 7923
	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
M
Matt Carlson 已提交
7924 7925
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
L
Linus Torvalds 已提交
7926 7927

	/* Receive/send statistics. */
7928 7929 7930 7931 7932 7933
	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
		val = tr32(RCVLPC_STATS_ENABLE);
		val &= ~RCVLPC_STATSENAB_DACK_FIX;
		tw32(RCVLPC_STATS_ENABLE, val);
	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
		   (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
L
Linus Torvalds 已提交
7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953
		val = tr32(RCVLPC_STATS_ENABLE);
		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
		tw32(RCVLPC_STATS_ENABLE, val);
	} else {
		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
	}
	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
	tw32(SNDDATAI_STATSENAB, 0xffffff);
	tw32(SNDDATAI_STATSCTRL,
	     (SNDDATAI_SCTRL_ENABLE |
	      SNDDATAI_SCTRL_FASTUPD));

	/* Setup host coalescing engine. */
	tw32(HOSTCC_MODE, 0);
	for (i = 0; i < 2000; i++) {
		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
			break;
		udelay(10);
	}

M
Michael Chan 已提交
7954
	__tg3_set_coalesce(tp, &tp->coal);
L
Linus Torvalds 已提交
7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		/* Status/statistics block address.  See tg3_timer,
		 * the tg3_periodic_fetch_stats call there, and
		 * tg3_get_stats to see how this works for 5705/5750 chips.
		 */
		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
		     ((u64) tp->stats_mapping >> 32));
		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
		     ((u64) tp->stats_mapping & 0xffffffff));
		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
M
Matt Carlson 已提交
7966

L
Linus Torvalds 已提交
7967
		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
M
Matt Carlson 已提交
7968 7969 7970 7971 7972 7973 7974 7975

		/* Clear statistics and status block memory areas */
		for (i = NIC_SRAM_STATS_BLK;
		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
		     i += sizeof(u32)) {
			tg3_write_mem(tp, i, 0);
			udelay(40);
		}
L
Linus Torvalds 已提交
7976 7977 7978 7979 7980 7981 7982 7983 7984
	}

	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);

	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);

M
Michael Chan 已提交
7985 7986 7987 7988 7989 7990 7991
	if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
		/* reset to prevent losing 1st rx packet intermittently */
		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
		udelay(10);
	}

7992 7993 7994 7995 7996
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
		tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
	else
		tp->mac_mode = 0;
	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
L
Linus Torvalds 已提交
7997
		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
M
Matt Carlson 已提交
7998 7999 8000 8001
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
L
Linus Torvalds 已提交
8002 8003 8004
	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
	udelay(40);

M
Michael Chan 已提交
8005
	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
M
Michael Chan 已提交
8006
	 * If TG3_FLG2_IS_NIC is zero, we should read the
M
Michael Chan 已提交
8007 8008 8009 8010
	 * register to preserve the GPIO settings for LOMs. The GPIOs,
	 * whether used as inputs or outputs, are set by boot code after
	 * reset.
	 */
M
Michael Chan 已提交
8011
	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
M
Michael Chan 已提交
8012 8013
		u32 gpio_mask;

M
Michael Chan 已提交
8014 8015 8016
		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
M
Michael Chan 已提交
8017 8018 8019 8020 8021

		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
				     GRC_LCLCTRL_GPIO_OUTPUT3;

M
Michael Chan 已提交
8022 8023 8024
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;

8025
		tp->grc_local_ctrl &= ~gpio_mask;
M
Michael Chan 已提交
8026 8027 8028
		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;

		/* GPIO1 must be driven high for eeprom write protect */
M
Michael Chan 已提交
8029 8030 8031
		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
					       GRC_LCLCTRL_GPIO_OUTPUT1);
M
Michael Chan 已提交
8032
	}
L
Linus Torvalds 已提交
8033 8034 8035
	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
	udelay(100);

M
Matt Carlson 已提交
8036 8037 8038 8039 8040 8041
	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
		val = tr32(MSGINT_MODE);
		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
		tw32(MSGINT_MODE, val);
	}

L
Linus Torvalds 已提交
8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
		udelay(40);
	}

	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
	       WDMAC_MODE_LNGREAD_ENAB);

M
Michael Chan 已提交
8053 8054 8055 8056
	/* If statement applies to 5705 and 5750 PCI devices only */
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8057
		if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
L
Linus Torvalds 已提交
8058 8059 8060 8061 8062 8063 8064 8065 8066 8067
		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
			/* nothing */
		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
			   !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
			val |= WDMAC_MODE_RX_ACCEL;
		}
	}

8068
	/* Enable host coalescing bug fix */
M
Matt Carlson 已提交
8069
	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
M
Matt Carlson 已提交
8070
		val |= WDMAC_MODE_STATUS_TAG_FIX;
8071

M
Matt Carlson 已提交
8072 8073 8074
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
		val |= WDMAC_MODE_BURST_ALL_DATA;

L
Linus Torvalds 已提交
8075 8076 8077
	tw32_f(WDMAC_MODE, val);
	udelay(40);

M
Matt Carlson 已提交
8078 8079 8080 8081 8082
	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
		u16 pcix_cmd;

		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				     &pcix_cmd);
L
Linus Torvalds 已提交
8083
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
M
Matt Carlson 已提交
8084 8085
			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
			pcix_cmd |= PCI_X_CMD_READ_2K;
L
Linus Torvalds 已提交
8086
		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
M
Matt Carlson 已提交
8087 8088
			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
			pcix_cmd |= PCI_X_CMD_READ_2K;
L
Linus Torvalds 已提交
8089
		}
M
Matt Carlson 已提交
8090 8091
		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
				      pcix_cmd);
L
Linus Torvalds 已提交
8092 8093 8094 8095 8096 8097 8098 8099
	}

	tw32_f(RDMAC_MODE, rdmac_mode);
	udelay(40);

	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
M
Matt Carlson 已提交
8100 8101 8102 8103 8104 8105 8106

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
		tw32(SNDDATAC_MODE,
		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
	else
		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);

L
Linus Torvalds 已提交
8107 8108 8109 8110 8111 8112
	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
M
Matt Carlson 已提交
8113
	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8114
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
M
Matt Carlson 已提交
8115 8116
		val |= SNDBDI_MODE_MULTI_TXQ_EN;
	tw32(SNDBDI_MODE, val);
L
Linus Torvalds 已提交
8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134
	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);

	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
		err = tg3_load_5701_a0_firmware_fix(tp);
		if (err)
			return err;
	}

	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
		err = tg3_load_tso_firmware(tp);
		if (err)
			return err;
	}

	tp->tx_mode = TX_MODE_ENABLE;
	tw32_f(MAC_TX_MODE, tp->tx_mode);
	udelay(100);

M
Matt Carlson 已提交
8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
		u32 reg = MAC_RSS_INDIR_TBL_0;
		u8 *ent = (u8 *)&val;

		/* Setup the indirection table */
		for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
			int idx = i % sizeof(val);

			ent[idx] = i % (tp->irq_cnt - 1);
			if (idx == sizeof(val) - 1) {
				tw32(reg, val);
				reg += 4;
			}
		}

		/* Setup the "secret" hash key. */
		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
	}

L
Linus Torvalds 已提交
8163
	tp->rx_mode = RX_MODE_ENABLE;
M
Matt Carlson 已提交
8164
	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
M
Michael Chan 已提交
8165 8166
		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;

M
Matt Carlson 已提交
8167 8168 8169 8170 8171 8172 8173 8174
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
		tp->rx_mode |= RX_MODE_RSS_ENABLE |
			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
			       RX_MODE_RSS_IPV6_HASH_EN |
			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
			       RX_MODE_RSS_IPV4_HASH_EN |
			       RX_MODE_RSS_TCP_IPV4_HASH_EN;

L
Linus Torvalds 已提交
8175 8176 8177 8178 8179 8180
	tw32_f(MAC_RX_MODE, tp->rx_mode);
	udelay(10);

	tw32(MAC_LED_CTRL, tp->led_ctrl);

	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
M
Michael Chan 已提交
8181
	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
L
Linus Torvalds 已提交
8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204
		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
		udelay(10);
	}
	tw32_f(MAC_RX_MODE, tp->rx_mode);
	udelay(10);

	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
			!(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
			/* Set drive transmission level to 1.2V  */
			/* only if the signal pre-emphasis bit is not set  */
			val = tr32(MAC_SERDES_CFG);
			val &= 0xfffff000;
			val |= 0x880;
			tw32(MAC_SERDES_CFG, val);
		}
		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
			tw32(MAC_SERDES_CFG, 0x616000);
	}

	/* Prevent chip from dropping frames when flow control
	 * is enabled.
	 */
8205 8206 8207 8208 8209
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
		val = 1;
	else
		val = 2;
	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
L
Linus Torvalds 已提交
8210 8211 8212 8213 8214 8215 8216

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
	    (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
		/* Use hardware link auto-negotiation */
		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
	}

8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227
	if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
		u32 tmp;

		tmp = tr32(SERDES_RX_CTRL);
		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
	}

M
Matt Carlson 已提交
8228 8229 8230 8231 8232 8233 8234
	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
		if (tp->link_config.phy_is_low_power) {
			tp->link_config.phy_is_low_power = 0;
			tp->link_config.speed = tp->link_config.orig_speed;
			tp->link_config.duplex = tp->link_config.orig_duplex;
			tp->link_config.autoneg = tp->link_config.orig_autoneg;
		}
L
Linus Torvalds 已提交
8235

M
Matt Carlson 已提交
8236 8237 8238
		err = tg3_setup_phy(tp, 0);
		if (err)
			return err;
L
Linus Torvalds 已提交
8239

M
Matt Carlson 已提交
8240
		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8241
		    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
M
Matt Carlson 已提交
8242 8243 8244 8245 8246 8247 8248 8249
			u32 tmp;

			/* Clear CRC stats. */
			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
				tg3_writephy(tp, MII_TG3_TEST1,
					     tmp | MII_TG3_TEST1_CRC_EN);
				tg3_readphy(tp, 0x14, &tmp);
			}
L
Linus Torvalds 已提交
8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260
		}
	}

	__tg3_set_rx_mode(tp->dev);

	/* Initialize receive rules. */
	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);

M
Michael Chan 已提交
8261
	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
M
Michael Chan 已提交
8262
	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
L
Linus Torvalds 已提交
8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301
		limit = 8;
	else
		limit = 16;
	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
		limit -= 4;
	switch (limit) {
	case 16:
		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
	case 15:
		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
	case 14:
		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
	case 13:
		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
	case 12:
		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
	case 11:
		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
	case 10:
		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
	case 9:
		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
	case 8:
		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
	case 7:
		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
	case 6:
		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
	case 5:
		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
	case 4:
		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
	case 3:
		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
	case 2:
	case 1:

	default:
		break;
8302
	}
L
Linus Torvalds 已提交
8303

M
Matt Carlson 已提交
8304 8305 8306 8307
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
		/* Write our heartbeat update interval to APE. */
		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
				APE_HOST_HEARTBEAT_INT_DISABLE);
M
Matt Carlson 已提交
8308

L
Linus Torvalds 已提交
8309 8310 8311 8312 8313 8314 8315 8316
	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);

	return 0;
}

/* Called at device open time to get the chip ready for
 * packet processing.  Invoked with tp->lock held.
 */
8317
static int tg3_init_hw(struct tg3 *tp, int reset_phy)
L
Linus Torvalds 已提交
8318 8319 8320 8321 8322
{
	tg3_switch_clocks(tp);

	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);

8323
	return tg3_reset_hw(tp, reset_phy);
L
Linus Torvalds 已提交
8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367
}

#define TG3_STAT_ADD32(PSTAT, REG) \
do {	u32 __val = tr32(REG); \
	(PSTAT)->low += __val; \
	if ((PSTAT)->low < __val) \
		(PSTAT)->high += 1; \
} while (0)

static void tg3_periodic_fetch_stats(struct tg3 *tp)
{
	struct tg3_hw_stats *sp = tp->hw_stats;

	if (!netif_carrier_ok(tp->dev))
		return;

	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);

	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8368 8369 8370 8371

	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
	TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
L
Linus Torvalds 已提交
8372 8373 8374 8375 8376 8377
}

static void tg3_timer(unsigned long __opaque)
{
	struct tg3 *tp = (struct tg3 *) __opaque;

8378 8379 8380
	if (tp->irq_sync)
		goto restart_timer;

8381
	spin_lock(&tp->lock);
L
Linus Torvalds 已提交
8382

8383 8384 8385 8386 8387
	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
		/* All of this garbage is because when using non-tagged
		 * IRQ status the mailbox/status_block protocol the chip
		 * uses with the cpu is race prone.
		 */
8388
		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8389 8390 8391 8392
			tw32(GRC_LOCAL_CTRL,
			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
		} else {
			tw32(HOSTCC_MODE, tp->coalesce_mode |
8393
			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8394
		}
L
Linus Torvalds 已提交
8395

8396 8397
		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
			tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8398
			spin_unlock(&tp->lock);
8399 8400 8401
			schedule_work(&tp->reset_task);
			return;
		}
L
Linus Torvalds 已提交
8402 8403 8404 8405
	}

	/* This part only runs once per second. */
	if (!--tp->timer_counter) {
8406 8407 8408
		if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
			tg3_periodic_fetch_stats(tp);

L
Linus Torvalds 已提交
8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437
		if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
			u32 mac_stat;
			int phy_event;

			mac_stat = tr32(MAC_STATUS);

			phy_event = 0;
			if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
					phy_event = 1;
			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
				phy_event = 1;

			if (phy_event)
				tg3_setup_phy(tp, 0);
		} else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
			u32 mac_stat = tr32(MAC_STATUS);
			int need_setup = 0;

			if (netif_carrier_ok(tp->dev) &&
			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
				need_setup = 1;
			}
			if (! netif_carrier_ok(tp->dev) &&
			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
					 MAC_STATUS_SIGNAL_DET))) {
				need_setup = 1;
			}
			if (need_setup) {
M
Michael Chan 已提交
8438 8439 8440 8441 8442 8443 8444 8445
				if (!tp->serdes_counter) {
					tw32_f(MAC_MODE,
					     (tp->mac_mode &
					      ~MAC_MODE_PORT_MODE_MASK));
					udelay(40);
					tw32_f(MAC_MODE, tp->mac_mode);
					udelay(40);
				}
L
Linus Torvalds 已提交
8446 8447
				tg3_setup_phy(tp, 0);
			}
M
Michael Chan 已提交
8448 8449
		} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
			tg3_serdes_parallel_detect(tp);
L
Linus Torvalds 已提交
8450 8451 8452 8453

		tp->timer_counter = tp->timer_multiplier;
	}

M
Michael Chan 已提交
8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470
	/* Heartbeat is only sent once every 2 seconds.
	 *
	 * The heartbeat is to tell the ASF firmware that the host
	 * driver is still alive.  In the event that the OS crashes,
	 * ASF needs to reset the hardware to free up the FIFO space
	 * that may be filled with rx packets destined for the host.
	 * If the FIFO is full, ASF will no longer function properly.
	 *
	 * Unintended resets have been reported on real time kernels
	 * where the timer doesn't run on time.  Netpoll will also have
	 * same problem.
	 *
	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
	 * to check the ring condition when the heartbeat is expiring
	 * before doing the reset.  This will prevent most unintended
	 * resets.
	 */
L
Linus Torvalds 已提交
8471
	if (!--tp->asf_counter) {
8472 8473
		if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
		    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8474 8475
			tg3_wait_for_event_ack(tp);

8476
			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
M
Michael Chan 已提交
8477
				      FWCMD_NICDRV_ALIVE3);
8478
			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
M
Michael Chan 已提交
8479
			/* 5 seconds timeout */
8480
			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
M
Matt Carlson 已提交
8481 8482

			tg3_generate_fw_event(tp);
L
Linus Torvalds 已提交
8483 8484 8485 8486
		}
		tp->asf_counter = tp->asf_multiplier;
	}

8487
	spin_unlock(&tp->lock);
L
Linus Torvalds 已提交
8488

8489
restart_timer:
L
Linus Torvalds 已提交
8490 8491 8492 8493
	tp->timer.expires = jiffies + tp->timer_offset;
	add_timer(&tp->timer);
}

8494
static int tg3_request_irq(struct tg3 *tp, int irq_num)
8495
{
8496
	irq_handler_t fn;
8497
	unsigned long flags;
8498 8499 8500 8501 8502 8503 8504 8505 8506 8507
	char *name;
	struct tg3_napi *tnapi = &tp->napi[irq_num];

	if (tp->irq_cnt == 1)
		name = tp->dev->name;
	else {
		name = &tnapi->irq_lbl[0];
		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
		name[IFNAMSIZ-1] = 0;
	}
8508

M
Matt Carlson 已提交
8509
	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8510 8511 8512
		fn = tg3_msi;
		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
			fn = tg3_msi_1shot;
8513
		flags = IRQF_SAMPLE_RANDOM;
8514 8515 8516 8517
	} else {
		fn = tg3_interrupt;
		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
			fn = tg3_interrupt_tagged;
8518
		flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8519
	}
8520 8521

	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8522 8523
}

M
Michael Chan 已提交
8524 8525
static int tg3_test_interrupt(struct tg3 *tp)
{
8526
	struct tg3_napi *tnapi = &tp->napi[0];
M
Michael Chan 已提交
8527
	struct net_device *dev = tp->dev;
8528
	int err, i, intr_ok = 0;
M
Matt Carlson 已提交
8529
	u32 val;
M
Michael Chan 已提交
8530

M
Michael Chan 已提交
8531 8532 8533
	if (!netif_running(dev))
		return -ENODEV;

M
Michael Chan 已提交
8534 8535
	tg3_disable_ints(tp);

8536
	free_irq(tnapi->irq_vec, tnapi);
M
Michael Chan 已提交
8537

M
Matt Carlson 已提交
8538 8539 8540 8541
	/*
	 * Turn off MSI one shot mode.  Otherwise this test has no
	 * observable way to know whether the interrupt was delivered.
	 */
M
Matt Carlson 已提交
8542 8543
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
M
Matt Carlson 已提交
8544 8545 8546 8547 8548
	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
		tw32(MSGINT_MODE, val);
	}

8549
	err = request_irq(tnapi->irq_vec, tg3_test_isr,
8550
			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
M
Michael Chan 已提交
8551 8552 8553
	if (err)
		return err;

8554
	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
M
Michael Chan 已提交
8555 8556 8557
	tg3_enable_ints(tp);

	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8558
	       tnapi->coal_now);
M
Michael Chan 已提交
8559 8560

	for (i = 0; i < 5; i++) {
8561 8562
		u32 int_mbox, misc_host_ctrl;

8563
		int_mbox = tr32_mailbox(tnapi->int_mbox);
8564 8565 8566 8567 8568
		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);

		if ((int_mbox != 0) ||
		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
			intr_ok = 1;
M
Michael Chan 已提交
8569
			break;
8570 8571
		}

M
Michael Chan 已提交
8572 8573 8574 8575 8576
		msleep(10);
	}

	tg3_disable_ints(tp);

8577
	free_irq(tnapi->irq_vec, tnapi);
8578

8579
	err = tg3_request_irq(tp, 0);
M
Michael Chan 已提交
8580 8581 8582 8583

	if (err)
		return err;

M
Matt Carlson 已提交
8584 8585
	if (intr_ok) {
		/* Reenable MSI one shot mode. */
M
Matt Carlson 已提交
8586 8587
		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
M
Matt Carlson 已提交
8588 8589 8590 8591
		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
			tw32(MSGINT_MODE, val);
		}
M
Michael Chan 已提交
8592
		return 0;
M
Matt Carlson 已提交
8593
	}
M
Michael Chan 已提交
8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627

	return -EIO;
}

/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
 * successfully restored
 */
static int tg3_test_msi(struct tg3 *tp)
{
	int err;
	u16 pci_cmd;

	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
		return 0;

	/* Turn off SERR reporting in case MSI terminates with Master
	 * Abort.
	 */
	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
	pci_write_config_word(tp->pdev, PCI_COMMAND,
			      pci_cmd & ~PCI_COMMAND_SERR);

	err = tg3_test_interrupt(tp);

	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);

	if (!err)
		return 0;

	/* other failures */
	if (err != -EIO)
		return err;

	/* MSI test failed, go back to INTx mode */
8628 8629
	netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
		    "Please report this failure to the PCI maintainer and include system chipset information\n");
M
Michael Chan 已提交
8630

8631
	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8632

M
Michael Chan 已提交
8633 8634 8635 8636
	pci_disable_msi(tp->pdev);

	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;

8637
	err = tg3_request_irq(tp, 0);
M
Michael Chan 已提交
8638 8639 8640 8641 8642 8643
	if (err)
		return err;

	/* Need to reset the chip because the MSI cycle may have terminated
	 * with Master Abort.
	 */
8644
	tg3_full_lock(tp, 1);
M
Michael Chan 已提交
8645

M
Michael Chan 已提交
8646
	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8647
	err = tg3_init_hw(tp, 1);
M
Michael Chan 已提交
8648

8649
	tg3_full_unlock(tp);
M
Michael Chan 已提交
8650 8651

	if (err)
8652
		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
M
Michael Chan 已提交
8653 8654 8655 8656

	return err;
}

M
Matt Carlson 已提交
8657 8658 8659 8660 8661
static int tg3_request_firmware(struct tg3 *tp)
{
	const __be32 *fw_data;

	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8662 8663
		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
			   tp->fw_needed);
M
Matt Carlson 已提交
8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675
		return -ENOENT;
	}

	fw_data = (void *)tp->fw->data;

	/* Firmware blob starts with version numbers, followed by
	 * start address and _full_ length including BSS sections
	 * (which must be longer than the actual data, of course
	 */

	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
	if (tp->fw_len < (tp->fw->size - 12)) {
8676 8677
		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
			   tp->fw_len, tp->fw_needed);
M
Matt Carlson 已提交
8678 8679 8680 8681 8682 8683 8684 8685 8686 8687
		release_firmware(tp->fw);
		tp->fw = NULL;
		return -EINVAL;
	}

	/* We no longer need firmware; we have it. */
	tp->fw_needed = NULL;
	return 0;
}

M
Matt Carlson 已提交
8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714
static bool tg3_enable_msix(struct tg3 *tp)
{
	int i, rc, cpus = num_online_cpus();
	struct msix_entry msix_ent[tp->irq_max];

	if (cpus == 1)
		/* Just fallback to the simpler MSI mode. */
		return false;

	/*
	 * We want as many rx rings enabled as there are cpus.
	 * The first MSIX vector only deals with link interrupts, etc,
	 * so we add one to the number of vectors we are requesting.
	 */
	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);

	for (i = 0; i < tp->irq_max; i++) {
		msix_ent[i].entry  = i;
		msix_ent[i].vector = 0;
	}

	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
	if (rc != 0) {
		if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
			return false;
		if (pci_enable_msix(tp->pdev, msix_ent, rc))
			return false;
8715 8716
		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
			      tp->irq_cnt, rc);
M
Matt Carlson 已提交
8717 8718 8719
		tp->irq_cnt = rc;
	}

M
Matt Carlson 已提交
8720 8721
	tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;

M
Matt Carlson 已提交
8722 8723 8724
	for (i = 0; i < tp->irq_max; i++)
		tp->napi[i].irq_vec = msix_ent[i].vector;

8725 8726 8727 8728 8729
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
		tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
		tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
	} else
		tp->dev->real_num_tx_queues = 1;
M
Matt Carlson 已提交
8730

M
Matt Carlson 已提交
8731 8732 8733
	return true;
}

8734 8735
static void tg3_ints_init(struct tg3 *tp)
{
M
Matt Carlson 已提交
8736 8737
	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
	    !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8738 8739 8740
		/* All MSI supporting chips should support tagged
		 * status.  Assert that this is the case.
		 */
8741
		netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
M
Matt Carlson 已提交
8742
		goto defcfg;
8743
	}
8744

M
Matt Carlson 已提交
8745 8746 8747 8748 8749 8750 8751 8752
	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
		tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
	else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
		 pci_enable_msi(tp->pdev) == 0)
		tp->tg3_flags2 |= TG3_FLG2_USING_MSI;

	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
		u32 msi_mode = tr32(MSGINT_MODE);
M
Matt Carlson 已提交
8753 8754
		if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
M
Matt Carlson 已提交
8755 8756 8757 8758 8759 8760
		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
	}
defcfg:
	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
		tp->irq_cnt = 1;
		tp->napi[0].irq_vec = tp->pdev->irq;
M
Matt Carlson 已提交
8761
		tp->dev->real_num_tx_queues = 1;
M
Matt Carlson 已提交
8762
	}
8763 8764 8765 8766
}

static void tg3_ints_fini(struct tg3 *tp)
{
M
Matt Carlson 已提交
8767 8768 8769 8770 8771
	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
		pci_disable_msix(tp->pdev);
	else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
		pci_disable_msi(tp->pdev);
	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
M
Matt Carlson 已提交
8772
	tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8773 8774
}

L
Linus Torvalds 已提交
8775 8776 8777
static int tg3_open(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
8778
	int i, err;
L
Linus Torvalds 已提交
8779

M
Matt Carlson 已提交
8780 8781 8782 8783 8784 8785
	if (tp->fw_needed) {
		err = tg3_request_firmware(tp);
		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
			if (err)
				return err;
		} else if (err) {
8786
			netdev_warn(tp->dev, "TSO capability disabled\n");
M
Matt Carlson 已提交
8787 8788
			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
		} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8789
			netdev_notice(tp->dev, "TSO capability restored\n");
M
Matt Carlson 已提交
8790 8791 8792 8793
			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
		}
	}

8794 8795
	netif_carrier_off(tp->dev);

M
Michael Chan 已提交
8796
	err = tg3_set_power_state(tp, PCI_D0);
8797
	if (err)
M
Michael Chan 已提交
8798
		return err;
8799 8800

	tg3_full_lock(tp, 0);
M
Michael Chan 已提交
8801

L
Linus Torvalds 已提交
8802 8803 8804
	tg3_disable_ints(tp);
	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;

8805
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
8806

M
Matt Carlson 已提交
8807 8808 8809 8810 8811 8812
	/*
	 * Setup interrupts first so we know how
	 * many NAPI resources to allocate
	 */
	tg3_ints_init(tp);

L
Linus Torvalds 已提交
8813 8814 8815 8816 8817
	/* The placement of this call is tied
	 * to the setup and use of Host TX descriptors.
	 */
	err = tg3_alloc_consistent(tp);
	if (err)
M
Matt Carlson 已提交
8818
		goto err_out1;
M
Michael Chan 已提交
8819

8820
	tg3_napi_enable(tp);
L
Linus Torvalds 已提交
8821

8822 8823 8824 8825 8826 8827 8828 8829 8830
	for (i = 0; i < tp->irq_cnt; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];
		err = tg3_request_irq(tp, i);
		if (err) {
			for (i--; i >= 0; i--)
				free_irq(tnapi->irq_vec, tnapi);
			break;
		}
	}
L
Linus Torvalds 已提交
8831

8832
	if (err)
M
Matt Carlson 已提交
8833
		goto err_out2;
8834

8835
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
8836

8837
	err = tg3_init_hw(tp, 1);
L
Linus Torvalds 已提交
8838
	if (err) {
M
Michael Chan 已提交
8839
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
L
Linus Torvalds 已提交
8840 8841
		tg3_free_rings(tp);
	} else {
8842 8843 8844 8845 8846 8847 8848 8849 8850
		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
			tp->timer_offset = HZ;
		else
			tp->timer_offset = HZ / 10;

		BUG_ON(tp->timer_offset > HZ);
		tp->timer_counter = tp->timer_multiplier =
			(HZ / tp->timer_offset);
		tp->asf_counter = tp->asf_multiplier =
M
Michael Chan 已提交
8851
			((HZ / tp->timer_offset) * 2);
L
Linus Torvalds 已提交
8852 8853 8854 8855 8856 8857 8858

		init_timer(&tp->timer);
		tp->timer.expires = jiffies + tp->timer_offset;
		tp->timer.data = (unsigned long) tp;
		tp->timer.function = tg3_timer;
	}

8859
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
8860

8861
	if (err)
M
Matt Carlson 已提交
8862
		goto err_out3;
L
Linus Torvalds 已提交
8863

M
Michael Chan 已提交
8864 8865
	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
		err = tg3_test_msi(tp);
8866

M
Michael Chan 已提交
8867
		if (err) {
8868
			tg3_full_lock(tp, 0);
M
Michael Chan 已提交
8869
			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
M
Michael Chan 已提交
8870
			tg3_free_rings(tp);
8871
			tg3_full_unlock(tp);
M
Michael Chan 已提交
8872

M
Matt Carlson 已提交
8873
			goto err_out2;
M
Michael Chan 已提交
8874
		}
8875

M
Matt Carlson 已提交
8876
		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
M
Matt Carlson 已提交
8877
		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
M
Matt Carlson 已提交
8878 8879 8880
		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
		    (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
			u32 val = tr32(PCIE_TRANSACTION_CFG);
8881

M
Matt Carlson 已提交
8882 8883
			tw32(PCIE_TRANSACTION_CFG,
			     val | PCIE_TRANS_CFG_1SHOT_MSI);
8884
		}
M
Michael Chan 已提交
8885 8886
	}

M
Matt Carlson 已提交
8887 8888
	tg3_phy_start(tp);

8889
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
8890

M
Michael Chan 已提交
8891 8892
	add_timer(&tp->timer);
	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
L
Linus Torvalds 已提交
8893 8894
	tg3_enable_ints(tp);

8895
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
8896

M
Matt Carlson 已提交
8897
	netif_tx_start_all_queues(dev);
L
Linus Torvalds 已提交
8898 8899

	return 0;
8900

M
Matt Carlson 已提交
8901
err_out3:
8902 8903 8904 8905
	for (i = tp->irq_cnt - 1; i >= 0; i--) {
		struct tg3_napi *tnapi = &tp->napi[i];
		free_irq(tnapi->irq_vec, tnapi);
	}
8906

M
Matt Carlson 已提交
8907
err_out2:
8908
	tg3_napi_disable(tp);
8909
	tg3_free_consistent(tp);
M
Matt Carlson 已提交
8910 8911 8912

err_out1:
	tg3_ints_fini(tp);
8913
	return err;
L
Linus Torvalds 已提交
8914 8915 8916 8917 8918 8919 8920 8921
}

#if 0
/*static*/ void tg3_dump_state(struct tg3 *tp)
{
	u32 val32, val32_2, val32_3, val32_4, val32_5;
	u16 val16;
	int i;
8922
	struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
L
Linus Torvalds 已提交
8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074

	pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
	printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
	       val16, val32);

	/* MAC block */
	printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
	       tr32(MAC_MODE), tr32(MAC_STATUS));
	printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
	       tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
	printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
	       tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
	printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
	       tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));

	/* Send data initiator control block */
	printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
	       tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
	printk("       SNDDATAI_STATSCTRL[%08x]\n",
	       tr32(SNDDATAI_STATSCTRL));

	/* Send data completion control block */
	printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));

	/* Send BD ring selector block */
	printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
	       tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));

	/* Send BD initiator control block */
	printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
	       tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));

	/* Send BD completion control block */
	printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));

	/* Receive list placement control block */
	printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
	       tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
	printk("       RCVLPC_STATSCTRL[%08x]\n",
	       tr32(RCVLPC_STATSCTRL));

	/* Receive data and receive BD initiator control block */
	printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
	       tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));

	/* Receive data completion control block */
	printk("DEBUG: RCVDCC_MODE[%08x]\n",
	       tr32(RCVDCC_MODE));

	/* Receive BD initiator control block */
	printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
	       tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));

	/* Receive BD completion control block */
	printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
	       tr32(RCVCC_MODE), tr32(RCVCC_STATUS));

	/* Receive list selector control block */
	printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
	       tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));

	/* Mbuf cluster free block */
	printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
	       tr32(MBFREE_MODE), tr32(MBFREE_STATUS));

	/* Host coalescing control block */
	printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
	       tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
	printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
	       tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
	printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
	       tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
	printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
	       tr32(HOSTCC_STATS_BLK_NIC_ADDR));
	printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
	       tr32(HOSTCC_STATUS_BLK_NIC_ADDR));

	/* Memory arbiter control block */
	printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
	       tr32(MEMARB_MODE), tr32(MEMARB_STATUS));

	/* Buffer manager control block */
	printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
	       tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
	printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
	       tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
	printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
	       "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
	       tr32(BUFMGR_DMA_DESC_POOL_ADDR),
	       tr32(BUFMGR_DMA_DESC_POOL_SIZE));

	/* Read DMA control block */
	printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
	       tr32(RDMAC_MODE), tr32(RDMAC_STATUS));

	/* Write DMA control block */
	printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
	       tr32(WDMAC_MODE), tr32(WDMAC_STATUS));

	/* DMA completion block */
	printk("DEBUG: DMAC_MODE[%08x]\n",
	       tr32(DMAC_MODE));

	/* GRC block */
	printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
	       tr32(GRC_MODE), tr32(GRC_MISC_CFG));
	printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
	       tr32(GRC_LOCAL_CTRL));

	/* TG3_BDINFOs */
	printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
	       tr32(RCVDBDI_JUMBO_BD + 0x0),
	       tr32(RCVDBDI_JUMBO_BD + 0x4),
	       tr32(RCVDBDI_JUMBO_BD + 0x8),
	       tr32(RCVDBDI_JUMBO_BD + 0xc));
	printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
	       tr32(RCVDBDI_STD_BD + 0x0),
	       tr32(RCVDBDI_STD_BD + 0x4),
	       tr32(RCVDBDI_STD_BD + 0x8),
	       tr32(RCVDBDI_STD_BD + 0xc));
	printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
	       tr32(RCVDBDI_MINI_BD + 0x0),
	       tr32(RCVDBDI_MINI_BD + 0x4),
	       tr32(RCVDBDI_MINI_BD + 0x8),
	       tr32(RCVDBDI_MINI_BD + 0xc));

	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
	tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
	printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
	       val32, val32_2, val32_3, val32_4);

	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
	tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
	printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
	       val32, val32_2, val32_3, val32_4);

	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
	tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
	printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
	       val32, val32_2, val32_3, val32_4, val32_5);

	/* SW status block */
9075 9076 9077 9078 9079 9080 9081 9082 9083
	printk(KERN_DEBUG
	 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
	       sblk->status,
	       sblk->status_tag,
	       sblk->rx_jumbo_consumer,
	       sblk->rx_consumer,
	       sblk->rx_mini_consumer,
	       sblk->idx[0].rx_producer,
	       sblk->idx[0].tx_consumer);
L
Linus Torvalds 已提交
9084 9085 9086 9087 9088 9089 9090 9091 9092 9093

	/* SW statistics block */
	printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
	       ((u32 *)tp->hw_stats)[0],
	       ((u32 *)tp->hw_stats)[1],
	       ((u32 *)tp->hw_stats)[2],
	       ((u32 *)tp->hw_stats)[3]);

	/* Mailboxes */
	printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
M
Michael Chan 已提交
9094 9095 9096 9097
	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
	       tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
	       tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
L
Linus Torvalds 已提交
9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150

	/* NIC side send descriptors. */
	for (i = 0; i < 6; i++) {
		unsigned long txd;

		txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
			+ (i * sizeof(struct tg3_tx_buffer_desc));
		printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
		       i,
		       readl(txd + 0x0), readl(txd + 0x4),
		       readl(txd + 0x8), readl(txd + 0xc));
	}

	/* NIC side RX descriptors. */
	for (i = 0; i < 6; i++) {
		unsigned long rxd;

		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
			+ (i * sizeof(struct tg3_rx_buffer_desc));
		printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
		       i,
		       readl(rxd + 0x0), readl(rxd + 0x4),
		       readl(rxd + 0x8), readl(rxd + 0xc));
		rxd += (4 * sizeof(u32));
		printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
		       i,
		       readl(rxd + 0x0), readl(rxd + 0x4),
		       readl(rxd + 0x8), readl(rxd + 0xc));
	}

	for (i = 0; i < 6; i++) {
		unsigned long rxd;

		rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
			+ (i * sizeof(struct tg3_rx_buffer_desc));
		printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
		       i,
		       readl(rxd + 0x0), readl(rxd + 0x4),
		       readl(rxd + 0x8), readl(rxd + 0xc));
		rxd += (4 * sizeof(u32));
		printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
		       i,
		       readl(rxd + 0x0), readl(rxd + 0x4),
		       readl(rxd + 0x8), readl(rxd + 0xc));
	}
}
#endif

static struct net_device_stats *tg3_get_stats(struct net_device *);
static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);

static int tg3_close(struct net_device *dev)
{
9151
	int i;
L
Linus Torvalds 已提交
9152 9153
	struct tg3 *tp = netdev_priv(dev);

9154
	tg3_napi_disable(tp);
9155
	cancel_work_sync(&tp->reset_task);
M
Michael Chan 已提交
9156

M
Matt Carlson 已提交
9157
	netif_tx_stop_all_queues(dev);
L
Linus Torvalds 已提交
9158 9159 9160

	del_timer_sync(&tp->timer);

M
Matt Carlson 已提交
9161 9162
	tg3_phy_stop(tp);

9163
	tg3_full_lock(tp, 1);
L
Linus Torvalds 已提交
9164 9165 9166 9167 9168 9169
#if 0
	tg3_dump_state(tp);
#endif

	tg3_disable_ints(tp);

M
Michael Chan 已提交
9170
	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
L
Linus Torvalds 已提交
9171
	tg3_free_rings(tp);
9172
	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
L
Linus Torvalds 已提交
9173

9174
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
9175

9176 9177 9178 9179
	for (i = tp->irq_cnt - 1; i >= 0; i--) {
		struct tg3_napi *tnapi = &tp->napi[i];
		free_irq(tnapi->irq_vec, tnapi);
	}
9180 9181

	tg3_ints_fini(tp);
L
Linus Torvalds 已提交
9182 9183 9184 9185 9186 9187 9188 9189

	memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
	       sizeof(tp->net_stats_prev));
	memcpy(&tp->estats_prev, tg3_get_estats(tp),
	       sizeof(tp->estats_prev));

	tg3_free_consistent(tp);

M
Michael Chan 已提交
9190 9191 9192 9193
	tg3_set_power_state(tp, PCI_D3hot);

	netif_carrier_off(tp->dev);

L
Linus Torvalds 已提交
9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208
	return 0;
}

static inline unsigned long get_stat64(tg3_stat64_t *val)
{
	unsigned long ret;

#if (BITS_PER_LONG == 32)
	ret = val->low;
#else
	ret = ((u64)val->high << 32) | ((u64)val->low);
#endif
	return ret;
}

9209 9210 9211 9212 9213
static inline u64 get_estat64(tg3_stat64_t *val)
{
       return ((u64)val->high << 32) | ((u64)val->low);
}

L
Linus Torvalds 已提交
9214 9215 9216 9217 9218 9219 9220 9221 9222
static unsigned long calc_crc_errors(struct tg3 *tp)
{
	struct tg3_hw_stats *hw_stats = tp->hw_stats;

	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
		u32 val;

9223
		spin_lock_bh(&tp->lock);
9224 9225 9226
		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
			tg3_writephy(tp, MII_TG3_TEST1,
				     val | MII_TG3_TEST1_CRC_EN);
L
Linus Torvalds 已提交
9227 9228 9229
			tg3_readphy(tp, 0x14, &val);
		} else
			val = 0;
9230
		spin_unlock_bh(&tp->lock);
L
Linus Torvalds 已提交
9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241

		tp->phy_crc_errors += val;

		return tp->phy_crc_errors;
	}

	return get_stat64(&hw_stats->rx_fcs_errors);
}

#define ESTAT_ADD(member) \
	estats->member =	old_estats->member + \
9242
				get_estat64(&hw_stats->member)
L
Linus Torvalds 已提交
9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345

static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
{
	struct tg3_ethtool_stats *estats = &tp->estats;
	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
	struct tg3_hw_stats *hw_stats = tp->hw_stats;

	if (!hw_stats)
		return old_estats;

	ESTAT_ADD(rx_octets);
	ESTAT_ADD(rx_fragments);
	ESTAT_ADD(rx_ucast_packets);
	ESTAT_ADD(rx_mcast_packets);
	ESTAT_ADD(rx_bcast_packets);
	ESTAT_ADD(rx_fcs_errors);
	ESTAT_ADD(rx_align_errors);
	ESTAT_ADD(rx_xon_pause_rcvd);
	ESTAT_ADD(rx_xoff_pause_rcvd);
	ESTAT_ADD(rx_mac_ctrl_rcvd);
	ESTAT_ADD(rx_xoff_entered);
	ESTAT_ADD(rx_frame_too_long_errors);
	ESTAT_ADD(rx_jabbers);
	ESTAT_ADD(rx_undersize_packets);
	ESTAT_ADD(rx_in_length_errors);
	ESTAT_ADD(rx_out_length_errors);
	ESTAT_ADD(rx_64_or_less_octet_packets);
	ESTAT_ADD(rx_65_to_127_octet_packets);
	ESTAT_ADD(rx_128_to_255_octet_packets);
	ESTAT_ADD(rx_256_to_511_octet_packets);
	ESTAT_ADD(rx_512_to_1023_octet_packets);
	ESTAT_ADD(rx_1024_to_1522_octet_packets);
	ESTAT_ADD(rx_1523_to_2047_octet_packets);
	ESTAT_ADD(rx_2048_to_4095_octet_packets);
	ESTAT_ADD(rx_4096_to_8191_octet_packets);
	ESTAT_ADD(rx_8192_to_9022_octet_packets);

	ESTAT_ADD(tx_octets);
	ESTAT_ADD(tx_collisions);
	ESTAT_ADD(tx_xon_sent);
	ESTAT_ADD(tx_xoff_sent);
	ESTAT_ADD(tx_flow_control);
	ESTAT_ADD(tx_mac_errors);
	ESTAT_ADD(tx_single_collisions);
	ESTAT_ADD(tx_mult_collisions);
	ESTAT_ADD(tx_deferred);
	ESTAT_ADD(tx_excessive_collisions);
	ESTAT_ADD(tx_late_collisions);
	ESTAT_ADD(tx_collide_2times);
	ESTAT_ADD(tx_collide_3times);
	ESTAT_ADD(tx_collide_4times);
	ESTAT_ADD(tx_collide_5times);
	ESTAT_ADD(tx_collide_6times);
	ESTAT_ADD(tx_collide_7times);
	ESTAT_ADD(tx_collide_8times);
	ESTAT_ADD(tx_collide_9times);
	ESTAT_ADD(tx_collide_10times);
	ESTAT_ADD(tx_collide_11times);
	ESTAT_ADD(tx_collide_12times);
	ESTAT_ADD(tx_collide_13times);
	ESTAT_ADD(tx_collide_14times);
	ESTAT_ADD(tx_collide_15times);
	ESTAT_ADD(tx_ucast_packets);
	ESTAT_ADD(tx_mcast_packets);
	ESTAT_ADD(tx_bcast_packets);
	ESTAT_ADD(tx_carrier_sense_errors);
	ESTAT_ADD(tx_discards);
	ESTAT_ADD(tx_errors);

	ESTAT_ADD(dma_writeq_full);
	ESTAT_ADD(dma_write_prioq_full);
	ESTAT_ADD(rxbds_empty);
	ESTAT_ADD(rx_discards);
	ESTAT_ADD(rx_errors);
	ESTAT_ADD(rx_threshold_hit);

	ESTAT_ADD(dma_readq_full);
	ESTAT_ADD(dma_read_prioq_full);
	ESTAT_ADD(tx_comp_queue_full);

	ESTAT_ADD(ring_set_send_prod_index);
	ESTAT_ADD(ring_status_update);
	ESTAT_ADD(nic_irqs);
	ESTAT_ADD(nic_avoided_irqs);
	ESTAT_ADD(nic_tx_threshold_hit);

	return estats;
}

static struct net_device_stats *tg3_get_stats(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
	struct net_device_stats *stats = &tp->net_stats;
	struct net_device_stats *old_stats = &tp->net_stats_prev;
	struct tg3_hw_stats *hw_stats = tp->hw_stats;

	if (!hw_stats)
		return old_stats;

	stats->rx_packets = old_stats->rx_packets +
		get_stat64(&hw_stats->rx_ucast_packets) +
		get_stat64(&hw_stats->rx_mcast_packets) +
		get_stat64(&hw_stats->rx_bcast_packets);
9346

L
Linus Torvalds 已提交
9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357
	stats->tx_packets = old_stats->tx_packets +
		get_stat64(&hw_stats->tx_ucast_packets) +
		get_stat64(&hw_stats->tx_mcast_packets) +
		get_stat64(&hw_stats->tx_bcast_packets);

	stats->rx_bytes = old_stats->rx_bytes +
		get_stat64(&hw_stats->rx_octets);
	stats->tx_bytes = old_stats->tx_bytes +
		get_stat64(&hw_stats->tx_octets);

	stats->rx_errors = old_stats->rx_errors +
9358
		get_stat64(&hw_stats->rx_errors);
L
Linus Torvalds 已提交
9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385
	stats->tx_errors = old_stats->tx_errors +
		get_stat64(&hw_stats->tx_errors) +
		get_stat64(&hw_stats->tx_mac_errors) +
		get_stat64(&hw_stats->tx_carrier_sense_errors) +
		get_stat64(&hw_stats->tx_discards);

	stats->multicast = old_stats->multicast +
		get_stat64(&hw_stats->rx_mcast_packets);
	stats->collisions = old_stats->collisions +
		get_stat64(&hw_stats->tx_collisions);

	stats->rx_length_errors = old_stats->rx_length_errors +
		get_stat64(&hw_stats->rx_frame_too_long_errors) +
		get_stat64(&hw_stats->rx_undersize_packets);

	stats->rx_over_errors = old_stats->rx_over_errors +
		get_stat64(&hw_stats->rxbds_empty);
	stats->rx_frame_errors = old_stats->rx_frame_errors +
		get_stat64(&hw_stats->rx_align_errors);
	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
		get_stat64(&hw_stats->tx_discards);
	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
		get_stat64(&hw_stats->tx_carrier_sense_errors);

	stats->rx_crc_errors = old_stats->rx_crc_errors +
		calc_crc_errors(tp);

9386 9387 9388
	stats->rx_missed_errors = old_stats->rx_missed_errors +
		get_stat64(&hw_stats->rx_discards);

L
Linus Torvalds 已提交
9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454
	return stats;
}

static inline u32 calc_crc(unsigned char *buf, int len)
{
	u32 reg;
	u32 tmp;
	int j, k;

	reg = 0xffffffff;

	for (j = 0; j < len; j++) {
		reg ^= buf[j];

		for (k = 0; k < 8; k++) {
			tmp = reg & 0x01;

			reg >>= 1;

			if (tmp) {
				reg ^= 0xedb88320;
			}
		}
	}

	return ~reg;
}

static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
{
	/* accept or reject all multicast frames */
	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
}

static void __tg3_set_rx_mode(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
	u32 rx_mode;

	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
				  RX_MODE_KEEP_VLAN_TAG);

	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
	 * flag clear.
	 */
#if TG3_VLAN_TAG_USED
	if (!tp->vlgrp &&
	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#else
	/* By definition, VLAN is disabled always in this
	 * case.
	 */
	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#endif

	if (dev->flags & IFF_PROMISC) {
		/* Promiscuous mode. */
		rx_mode |= RX_MODE_PROMISC;
	} else if (dev->flags & IFF_ALLMULTI) {
		/* Accept all multicast. */
		tg3_set_multi (tp, 1);
9455
	} else if (netdev_mc_empty(dev)) {
L
Linus Torvalds 已提交
9456 9457 9458 9459 9460 9461 9462 9463 9464 9465
		/* Reject all multicast. */
		tg3_set_multi (tp, 0);
	} else {
		/* Accept one or more multicast(s). */
		struct dev_mc_list *mclist;
		u32 mc_filter[4] = { 0, };
		u32 regidx;
		u32 bit;
		u32 crc;

9466
		netdev_for_each_mc_addr(mclist, dev) {
L
Linus Torvalds 已提交
9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490
			crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
			bit = ~crc & 0x7f;
			regidx = (bit & 0x60) >> 5;
			bit &= 0x1f;
			mc_filter[regidx] |= (1 << bit);
		}

		tw32(MAC_HASH_REG_0, mc_filter[0]);
		tw32(MAC_HASH_REG_1, mc_filter[1]);
		tw32(MAC_HASH_REG_2, mc_filter[2]);
		tw32(MAC_HASH_REG_3, mc_filter[3]);
	}

	if (rx_mode != tp->rx_mode) {
		tp->rx_mode = rx_mode;
		tw32_f(MAC_RX_MODE, rx_mode);
		udelay(10);
	}
}

static void tg3_set_rx_mode(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);

9491 9492 9493
	if (!netif_running(dev))
		return;

9494
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
9495
	__tg3_set_rx_mode(dev);
9496
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517
}

#define TG3_REGDUMP_LEN		(32 * 1024)

static int tg3_get_regs_len(struct net_device *dev)
{
	return TG3_REGDUMP_LEN;
}

static void tg3_get_regs(struct net_device *dev,
		struct ethtool_regs *regs, void *_p)
{
	u32 *p = _p;
	struct tg3 *tp = netdev_priv(dev);
	u8 *orig_p = _p;
	int i;

	regs->version = 0;

	memset(p, 0, TG3_REGDUMP_LEN);

M
Michael Chan 已提交
9518 9519 9520
	if (tp->link_config.phy_is_low_power)
		return;

9521
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556

#define __GET_REG32(reg)	(*(p)++ = tr32(reg))
#define GET_REG32_LOOP(base,len)		\
do {	p = (u32 *)(orig_p + (base));		\
	for (i = 0; i < len; i += 4)		\
		__GET_REG32((base) + i);	\
} while (0)
#define GET_REG32_1(reg)			\
do {	p = (u32 *)(orig_p + (reg));		\
	__GET_REG32((reg));			\
} while (0)

	GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
	GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
	GET_REG32_LOOP(MAC_MODE, 0x4f0);
	GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
	GET_REG32_1(SNDDATAC_MODE);
	GET_REG32_LOOP(SNDBDS_MODE, 0x80);
	GET_REG32_LOOP(SNDBDI_MODE, 0x48);
	GET_REG32_1(SNDBDC_MODE);
	GET_REG32_LOOP(RCVLPC_MODE, 0x20);
	GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
	GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
	GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
	GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
	GET_REG32_1(RCVDCC_MODE);
	GET_REG32_LOOP(RCVBDI_MODE, 0x20);
	GET_REG32_LOOP(RCVCC_MODE, 0x14);
	GET_REG32_LOOP(RCVLSC_MODE, 0x08);
	GET_REG32_1(MBFREE_MODE);
	GET_REG32_LOOP(HOSTCC_MODE, 0x100);
	GET_REG32_LOOP(MEMARB_MODE, 0x10);
	GET_REG32_LOOP(BUFMGR_MODE, 0x58);
	GET_REG32_LOOP(RDMAC_MODE, 0x08);
	GET_REG32_LOOP(WDMAC_MODE, 0x08);
9557 9558 9559 9560 9561 9562 9563
	GET_REG32_1(RX_CPU_MODE);
	GET_REG32_1(RX_CPU_STATE);
	GET_REG32_1(RX_CPU_PGMCTR);
	GET_REG32_1(RX_CPU_HWBKPT);
	GET_REG32_1(TX_CPU_MODE);
	GET_REG32_1(TX_CPU_STATE);
	GET_REG32_1(TX_CPU_PGMCTR);
L
Linus Torvalds 已提交
9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575
	GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
	GET_REG32_LOOP(FTQ_RESET, 0x120);
	GET_REG32_LOOP(MSGINT_MODE, 0x0c);
	GET_REG32_1(DMAC_MODE);
	GET_REG32_LOOP(GRC_MODE, 0x4c);
	if (tp->tg3_flags & TG3_FLAG_NVRAM)
		GET_REG32_LOOP(NVRAM_CMD, 0x24);

#undef __GET_REG32
#undef GET_REG32_LOOP
#undef GET_REG32_1

9576
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590
}

static int tg3_get_eeprom_len(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);

	return tp->nvram_size;
}

static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
	struct tg3 *tp = netdev_priv(dev);
	int ret;
	u8  *pd;
A
Al Viro 已提交
9591
	u32 i, offset, len, b_offset, b_count;
9592
	__be32 val;
L
Linus Torvalds 已提交
9593

M
Matt Carlson 已提交
9594 9595 9596
	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
		return -EINVAL;

M
Michael Chan 已提交
9597 9598 9599
	if (tp->link_config.phy_is_low_power)
		return -EAGAIN;

L
Linus Torvalds 已提交
9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613
	offset = eeprom->offset;
	len = eeprom->len;
	eeprom->len = 0;

	eeprom->magic = TG3_EEPROM_MAGIC;

	if (offset & 3) {
		/* adjustments to start on required 4 byte boundary */
		b_offset = offset & 3;
		b_count = 4 - b_offset;
		if (b_count > len) {
			/* i.e. offset=1 len=2 */
			b_count = len;
		}
9614
		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
L
Linus Torvalds 已提交
9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625
		if (ret)
			return ret;
		memcpy(data, ((char*)&val) + b_offset, b_count);
		len -= b_count;
		offset += b_count;
	        eeprom->len += b_count;
	}

	/* read bytes upto the last 4 byte boundary */
	pd = &data[eeprom->len];
	for (i = 0; i < (len - (len & 3)); i += 4) {
9626
		ret = tg3_nvram_read_be32(tp, offset + i, &val);
L
Linus Torvalds 已提交
9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639
		if (ret) {
			eeprom->len += i;
			return ret;
		}
		memcpy(pd + i, &val, 4);
	}
	eeprom->len += i;

	if (len & 3) {
		/* read last bytes not ending on 4 byte boundary */
		pd = &data[eeprom->len];
		b_count = len & 3;
		b_offset = offset + len - b_count;
9640
		ret = tg3_nvram_read_be32(tp, b_offset, &val);
L
Linus Torvalds 已提交
9641 9642
		if (ret)
			return ret;
A
Al Viro 已提交
9643
		memcpy(pd, &val, b_count);
L
Linus Torvalds 已提交
9644 9645 9646 9647 9648
		eeprom->len += b_count;
	}
	return 0;
}

9649
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
L
Linus Torvalds 已提交
9650 9651 9652 9653 9654

static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
	struct tg3 *tp = netdev_priv(dev);
	int ret;
A
Al Viro 已提交
9655
	u32 offset, len, b_offset, odd_len;
L
Linus Torvalds 已提交
9656
	u8 *buf;
9657
	__be32 start, end;
L
Linus Torvalds 已提交
9658

M
Michael Chan 已提交
9659 9660 9661
	if (tp->link_config.phy_is_low_power)
		return -EAGAIN;

M
Matt Carlson 已提交
9662 9663
	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
	    eeprom->magic != TG3_EEPROM_MAGIC)
L
Linus Torvalds 已提交
9664 9665 9666 9667 9668 9669 9670
		return -EINVAL;

	offset = eeprom->offset;
	len = eeprom->len;

	if ((b_offset = (offset & 3))) {
		/* adjustments to start on required 4 byte boundary */
9671
		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
L
Linus Torvalds 已提交
9672 9673 9674 9675
		if (ret)
			return ret;
		len += b_offset;
		offset &= ~3;
9676 9677
		if (len < 4)
			len = 4;
L
Linus Torvalds 已提交
9678 9679 9680
	}

	odd_len = 0;
9681
	if (len & 3) {
L
Linus Torvalds 已提交
9682 9683 9684
		/* adjustments to end on required 4 byte boundary */
		odd_len = 1;
		len = (len + 3) & ~3;
9685
		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
L
Linus Torvalds 已提交
9686 9687 9688 9689 9690 9691 9692
		if (ret)
			return ret;
	}

	buf = data;
	if (b_offset || odd_len) {
		buf = kmalloc(len, GFP_KERNEL);
A
Andy Gospodarek 已提交
9693
		if (!buf)
L
Linus Torvalds 已提交
9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711
			return -ENOMEM;
		if (b_offset)
			memcpy(buf, &start, 4);
		if (odd_len)
			memcpy(buf+len-4, &end, 4);
		memcpy(buf + b_offset, data, eeprom->len);
	}

	ret = tg3_nvram_write_block(tp, offset, len, buf);

	if (buf != data)
		kfree(buf);

	return ret;
}

static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
M
Matt Carlson 已提交
9712 9713 9714
	struct tg3 *tp = netdev_priv(dev);

	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9715
		struct phy_device *phydev;
M
Matt Carlson 已提交
9716 9717
		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
			return -EAGAIN;
9718 9719
		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
		return phy_ethtool_gset(phydev, cmd);
M
Matt Carlson 已提交
9720
	}
9721

L
Linus Torvalds 已提交
9722 9723 9724 9725 9726 9727
	cmd->supported = (SUPPORTED_Autoneg);

	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
		cmd->supported |= (SUPPORTED_1000baseT_Half |
				   SUPPORTED_1000baseT_Full);

9728
	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
L
Linus Torvalds 已提交
9729 9730 9731 9732
		cmd->supported |= (SUPPORTED_100baseT_Half |
				  SUPPORTED_100baseT_Full |
				  SUPPORTED_10baseT_Half |
				  SUPPORTED_10baseT_Full |
M
Matt Carlson 已提交
9733
				  SUPPORTED_TP);
9734 9735
		cmd->port = PORT_TP;
	} else {
L
Linus Torvalds 已提交
9736
		cmd->supported |= SUPPORTED_FIBRE;
9737 9738
		cmd->port = PORT_FIBRE;
	}
9739

L
Linus Torvalds 已提交
9740 9741 9742 9743 9744
	cmd->advertising = tp->link_config.advertising;
	if (netif_running(dev)) {
		cmd->speed = tp->link_config.active_speed;
		cmd->duplex = tp->link_config.active_duplex;
	}
9745
	cmd->phy_address = tp->phy_addr;
9746
	cmd->transceiver = XCVR_INTERNAL;
L
Linus Torvalds 已提交
9747 9748 9749 9750 9751
	cmd->autoneg = tp->link_config.autoneg;
	cmd->maxtxpkt = 0;
	cmd->maxrxpkt = 0;
	return 0;
}
9752

L
Linus Torvalds 已提交
9753 9754 9755
static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct tg3 *tp = netdev_priv(dev);
9756

M
Matt Carlson 已提交
9757
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9758
		struct phy_device *phydev;
M
Matt Carlson 已提交
9759 9760
		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
			return -EAGAIN;
9761 9762
		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
		return phy_ethtool_sset(phydev, cmd);
M
Matt Carlson 已提交
9763 9764
	}

9765 9766
	if (cmd->autoneg != AUTONEG_ENABLE &&
	    cmd->autoneg != AUTONEG_DISABLE)
9767
		return -EINVAL;
9768 9769 9770 9771

	if (cmd->autoneg == AUTONEG_DISABLE &&
	    cmd->duplex != DUPLEX_FULL &&
	    cmd->duplex != DUPLEX_HALF)
9772
		return -EINVAL;
L
Linus Torvalds 已提交
9773

9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816
	if (cmd->autoneg == AUTONEG_ENABLE) {
		u32 mask = ADVERTISED_Autoneg |
			   ADVERTISED_Pause |
			   ADVERTISED_Asym_Pause;

		if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
			mask |= ADVERTISED_1000baseT_Half |
				ADVERTISED_1000baseT_Full;

		if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
			mask |= ADVERTISED_100baseT_Half |
				ADVERTISED_100baseT_Full |
				ADVERTISED_10baseT_Half |
				ADVERTISED_10baseT_Full |
				ADVERTISED_TP;
		else
			mask |= ADVERTISED_FIBRE;

		if (cmd->advertising & ~mask)
			return -EINVAL;

		mask &= (ADVERTISED_1000baseT_Half |
			 ADVERTISED_1000baseT_Full |
			 ADVERTISED_100baseT_Half |
			 ADVERTISED_100baseT_Full |
			 ADVERTISED_10baseT_Half |
			 ADVERTISED_10baseT_Full);

		cmd->advertising &= mask;
	} else {
		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
			if (cmd->speed != SPEED_1000)
				return -EINVAL;

			if (cmd->duplex != DUPLEX_FULL)
				return -EINVAL;
		} else {
			if (cmd->speed != SPEED_100 &&
			    cmd->speed != SPEED_10)
				return -EINVAL;
		}
	}

9817
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
9818 9819 9820

	tp->link_config.autoneg = cmd->autoneg;
	if (cmd->autoneg == AUTONEG_ENABLE) {
9821 9822
		tp->link_config.advertising = (cmd->advertising |
					      ADVERTISED_Autoneg);
L
Linus Torvalds 已提交
9823 9824 9825 9826 9827 9828
		tp->link_config.speed = SPEED_INVALID;
		tp->link_config.duplex = DUPLEX_INVALID;
	} else {
		tp->link_config.advertising = 0;
		tp->link_config.speed = cmd->speed;
		tp->link_config.duplex = cmd->duplex;
M
Matt Carlson 已提交
9829
	}
9830

9831 9832 9833 9834
	tp->link_config.orig_speed = tp->link_config.speed;
	tp->link_config.orig_duplex = tp->link_config.duplex;
	tp->link_config.orig_autoneg = tp->link_config.autoneg;

L
Linus Torvalds 已提交
9835 9836 9837
	if (netif_running(dev))
		tg3_setup_phy(tp, 1);

9838
	tg3_full_unlock(tp);
9839

L
Linus Torvalds 已提交
9840 9841
	return 0;
}
9842

L
Linus Torvalds 已提交
9843 9844 9845
static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
	struct tg3 *tp = netdev_priv(dev);
9846

L
Linus Torvalds 已提交
9847 9848
	strcpy(info->driver, DRV_MODULE_NAME);
	strcpy(info->version, DRV_MODULE_VERSION);
M
Michael Chan 已提交
9849
	strcpy(info->fw_version, tp->fw_ver);
L
Linus Torvalds 已提交
9850 9851
	strcpy(info->bus_info, pci_name(tp->pdev));
}
9852

L
Linus Torvalds 已提交
9853 9854 9855
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct tg3 *tp = netdev_priv(dev);
9856

9857 9858
	if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
	    device_can_wakeup(&tp->pdev->dev))
G
Gary Zambrano 已提交
9859 9860 9861
		wol->supported = WAKE_MAGIC;
	else
		wol->supported = 0;
L
Linus Torvalds 已提交
9862
	wol->wolopts = 0;
9863 9864
	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
	    device_can_wakeup(&tp->pdev->dev))
L
Linus Torvalds 已提交
9865 9866 9867
		wol->wolopts = WAKE_MAGIC;
	memset(&wol->sopass, 0, sizeof(wol->sopass));
}
9868

L
Linus Torvalds 已提交
9869 9870 9871
static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct tg3 *tp = netdev_priv(dev);
9872
	struct device *dp = &tp->pdev->dev;
9873

L
Linus Torvalds 已提交
9874 9875 9876
	if (wol->wolopts & ~WAKE_MAGIC)
		return -EINVAL;
	if ((wol->wolopts & WAKE_MAGIC) &&
9877
	    !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
L
Linus Torvalds 已提交
9878
		return -EINVAL;
9879

9880
	spin_lock_bh(&tp->lock);
9881
	if (wol->wolopts & WAKE_MAGIC) {
L
Linus Torvalds 已提交
9882
		tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9883 9884
		device_set_wakeup_enable(dp, true);
	} else {
L
Linus Torvalds 已提交
9885
		tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9886 9887
		device_set_wakeup_enable(dp, false);
	}
9888
	spin_unlock_bh(&tp->lock);
9889

L
Linus Torvalds 已提交
9890 9891
	return 0;
}
9892

L
Linus Torvalds 已提交
9893 9894 9895 9896 9897
static u32 tg3_get_msglevel(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
	return tp->msg_enable;
}
9898

L
Linus Torvalds 已提交
9899 9900 9901 9902 9903
static void tg3_set_msglevel(struct net_device *dev, u32 value)
{
	struct tg3 *tp = netdev_priv(dev);
	tp->msg_enable = value;
}
9904

L
Linus Torvalds 已提交
9905 9906 9907 9908 9909 9910 9911 9912 9913
static int tg3_set_tso(struct net_device *dev, u32 value)
{
	struct tg3 *tp = netdev_priv(dev);

	if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
		if (value)
			return -EINVAL;
		return 0;
	}
M
Matt Carlson 已提交
9914
	if ((dev->features & NETIF_F_IPV6_CSUM) &&
M
Matt Carlson 已提交
9915 9916
	    ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
	     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
M
Matt Carlson 已提交
9917
		if (value) {
M
Michael Chan 已提交
9918
			dev->features |= NETIF_F_TSO6;
M
Matt Carlson 已提交
9919 9920
			if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
M
Matt Carlson 已提交
9921 9922
			    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
			     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
M
Matt Carlson 已提交
9923
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
M
Matt Carlson 已提交
9924
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
M
Matt Carlson 已提交
9925 9926 9927
				dev->features |= NETIF_F_TSO_ECN;
		} else
			dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
M
Michael Chan 已提交
9928
	}
L
Linus Torvalds 已提交
9929 9930
	return ethtool_op_set_tso(dev, value);
}
9931

L
Linus Torvalds 已提交
9932 9933 9934 9935
static int tg3_nway_reset(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
	int r;
9936

L
Linus Torvalds 已提交
9937 9938 9939
	if (!netif_running(dev))
		return -EAGAIN;

M
Michael Chan 已提交
9940 9941 9942
	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
		return -EINVAL;

M
Matt Carlson 已提交
9943 9944 9945
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
			return -EAGAIN;
9946
		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
M
Matt Carlson 已提交
9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960
	} else {
		u32 bmcr;

		spin_lock_bh(&tp->lock);
		r = -EINVAL;
		tg3_readphy(tp, MII_BMCR, &bmcr);
		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
		    ((bmcr & BMCR_ANENABLE) ||
		     (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
						   BMCR_ANENABLE);
			r = 0;
		}
		spin_unlock_bh(&tp->lock);
L
Linus Torvalds 已提交
9961
	}
9962

L
Linus Torvalds 已提交
9963 9964
	return r;
}
9965

L
Linus Torvalds 已提交
9966 9967 9968
static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
	struct tg3 *tp = netdev_priv(dev);
9969

L
Linus Torvalds 已提交
9970 9971
	ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
	ering->rx_mini_max_pending = 0;
M
Michael Chan 已提交
9972 9973 9974 9975 9976 9977
	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
		ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
	else
		ering->rx_jumbo_max_pending = 0;

	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
L
Linus Torvalds 已提交
9978 9979 9980

	ering->rx_pending = tp->rx_pending;
	ering->rx_mini_pending = 0;
M
Michael Chan 已提交
9981 9982 9983 9984 9985
	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
	else
		ering->rx_jumbo_pending = 0;

9986
	ering->tx_pending = tp->napi[0].tx_pending;
L
Linus Torvalds 已提交
9987
}
9988

L
Linus Torvalds 已提交
9989 9990 9991
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
	struct tg3 *tp = netdev_priv(dev);
M
Matt Carlson 已提交
9992
	int i, irq_sync = 0, err = 0;
9993

L
Linus Torvalds 已提交
9994 9995
	if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
	    (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9996 9997
	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
M
Michael Chan 已提交
9998
	    ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9999
	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
L
Linus Torvalds 已提交
10000
		return -EINVAL;
10001

10002
	if (netif_running(dev)) {
M
Matt Carlson 已提交
10003
		tg3_phy_stop(tp);
L
Linus Torvalds 已提交
10004
		tg3_netif_stop(tp);
10005 10006
		irq_sync = 1;
	}
L
Linus Torvalds 已提交
10007

10008
	tg3_full_lock(tp, irq_sync);
10009

L
Linus Torvalds 已提交
10010 10011 10012 10013 10014 10015
	tp->rx_pending = ering->rx_pending;

	if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
	    tp->rx_pending > 63)
		tp->rx_pending = 63;
	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
M
Matt Carlson 已提交
10016 10017 10018

	for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
		tp->napi[i].tx_pending = ering->tx_pending;
L
Linus Torvalds 已提交
10019 10020

	if (netif_running(dev)) {
M
Michael Chan 已提交
10021
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
M
Michael Chan 已提交
10022 10023 10024
		err = tg3_restart_hw(tp, 1);
		if (!err)
			tg3_netif_start(tp);
L
Linus Torvalds 已提交
10025 10026
	}

10027
	tg3_full_unlock(tp);
10028

M
Matt Carlson 已提交
10029 10030 10031
	if (irq_sync && !err)
		tg3_phy_start(tp);

M
Michael Chan 已提交
10032
	return err;
L
Linus Torvalds 已提交
10033
}
10034

L
Linus Torvalds 已提交
10035 10036 10037
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
	struct tg3 *tp = netdev_priv(dev);
10038

L
Linus Torvalds 已提交
10039
	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10040

10041
	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10042 10043 10044 10045
		epause->rx_pause = 1;
	else
		epause->rx_pause = 0;

10046
	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10047 10048 10049
		epause->tx_pause = 1;
	else
		epause->tx_pause = 0;
L
Linus Torvalds 已提交
10050
}
10051

L
Linus Torvalds 已提交
10052 10053 10054
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
	struct tg3 *tp = netdev_priv(dev);
M
Matt Carlson 已提交
10055
	int err = 0;
10056

M
Matt Carlson 已提交
10057
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10058 10059
		u32 newadv;
		struct phy_device *phydev;
L
Linus Torvalds 已提交
10060

10061
		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10062

10063 10064 10065 10066 10067
		if (!(phydev->supported & SUPPORTED_Pause) ||
		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
		     ((epause->rx_pause && !epause->tx_pause) ||
		      (!epause->rx_pause && epause->tx_pause))))
			return -EINVAL;
L
Linus Torvalds 已提交
10068

10069 10070 10071 10072 10073 10074 10075
		tp->link_config.flowctrl = 0;
		if (epause->rx_pause) {
			tp->link_config.flowctrl |= FLOW_CTRL_RX;

			if (epause->tx_pause) {
				tp->link_config.flowctrl |= FLOW_CTRL_TX;
				newadv = ADVERTISED_Pause;
M
Matt Carlson 已提交
10076
			} else
10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107
				newadv = ADVERTISED_Pause |
					 ADVERTISED_Asym_Pause;
		} else if (epause->tx_pause) {
			tp->link_config.flowctrl |= FLOW_CTRL_TX;
			newadv = ADVERTISED_Asym_Pause;
		} else
			newadv = 0;

		if (epause->autoneg)
			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
		else
			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;

		if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
			u32 oldadv = phydev->advertising &
				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
			if (oldadv != newadv) {
				phydev->advertising &=
					~(ADVERTISED_Pause |
					  ADVERTISED_Asym_Pause);
				phydev->advertising |= newadv;
				if (phydev->autoneg) {
					/*
					 * Always renegotiate the link to
					 * inform our link partner of our
					 * flow control settings, even if the
					 * flow control is forced.  Let
					 * tg3_adjust_link() do the final
					 * flow control setup.
					 */
					return phy_start_aneg(phydev);
M
Matt Carlson 已提交
10108 10109 10110
				}
			}

10111
			if (!epause->autoneg)
M
Matt Carlson 已提交
10112
				tg3_setup_flow_control(tp, 0, 0);
10113 10114 10115 10116 10117
		} else {
			tp->link_config.orig_advertising &=
					~(ADVERTISED_Pause |
					  ADVERTISED_Asym_Pause);
			tp->link_config.orig_advertising |= newadv;
M
Matt Carlson 已提交
10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133
		}
	} else {
		int irq_sync = 0;

		if (netif_running(dev)) {
			tg3_netif_stop(tp);
			irq_sync = 1;
		}

		tg3_full_lock(tp, irq_sync);

		if (epause->autoneg)
			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
		else
			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
		if (epause->rx_pause)
10134
			tp->link_config.flowctrl |= FLOW_CTRL_RX;
M
Matt Carlson 已提交
10135
		else
10136
			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
M
Matt Carlson 已提交
10137
		if (epause->tx_pause)
10138
			tp->link_config.flowctrl |= FLOW_CTRL_TX;
M
Matt Carlson 已提交
10139
		else
10140
			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
M
Matt Carlson 已提交
10141 10142 10143 10144 10145 10146 10147 10148 10149 10150

		if (netif_running(dev)) {
			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
			err = tg3_restart_hw(tp, 1);
			if (!err)
				tg3_netif_start(tp);
		}

		tg3_full_unlock(tp);
	}
10151

M
Michael Chan 已提交
10152
	return err;
L
Linus Torvalds 已提交
10153
}
10154

L
Linus Torvalds 已提交
10155 10156 10157 10158 10159
static u32 tg3_get_rx_csum(struct net_device *dev)
{
	struct tg3 *tp = netdev_priv(dev);
	return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
}
10160

L
Linus Torvalds 已提交
10161 10162 10163
static int tg3_set_rx_csum(struct net_device *dev, u32 data)
{
	struct tg3 *tp = netdev_priv(dev);
10164

L
Linus Torvalds 已提交
10165 10166 10167 10168 10169
	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
		if (data != 0)
			return -EINVAL;
  		return 0;
  	}
10170

10171
	spin_lock_bh(&tp->lock);
L
Linus Torvalds 已提交
10172 10173 10174 10175
	if (data)
		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
	else
		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10176
	spin_unlock_bh(&tp->lock);
10177

L
Linus Torvalds 已提交
10178 10179
	return 0;
}
10180

L
Linus Torvalds 已提交
10181 10182 10183
static int tg3_set_tx_csum(struct net_device *dev, u32 data)
{
	struct tg3 *tp = netdev_priv(dev);
10184

L
Linus Torvalds 已提交
10185 10186 10187 10188 10189
	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
		if (data != 0)
			return -EINVAL;
  		return 0;
  	}
10190

M
Matt Carlson 已提交
10191
	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10192
		ethtool_op_set_tx_ipv6_csum(dev, data);
L
Linus Torvalds 已提交
10193
	else
M
Michael Chan 已提交
10194
		ethtool_op_set_tx_csum(dev, data);
L
Linus Torvalds 已提交
10195 10196 10197 10198

	return 0;
}

10199
static int tg3_get_sset_count (struct net_device *dev, int sset)
L
Linus Torvalds 已提交
10200
{
10201 10202 10203 10204 10205 10206 10207 10208
	switch (sset) {
	case ETH_SS_TEST:
		return TG3_NUM_TEST;
	case ETH_SS_STATS:
		return TG3_NUM_STATS;
	default:
		return -EOPNOTSUPP;
	}
10209 10210
}

L
Linus Torvalds 已提交
10211 10212 10213 10214 10215 10216
static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
{
	switch (stringset) {
	case ETH_SS_STATS:
		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
		break;
10217 10218 10219
	case ETH_SS_TEST:
		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
		break;
L
Linus Torvalds 已提交
10220 10221 10222 10223 10224 10225
	default:
		WARN_ON(1);	/* we need a WARN() */
		break;
	}
}

M
Michael Chan 已提交
10226 10227 10228 10229 10230 10231 10232 10233 10234
static int tg3_phys_id(struct net_device *dev, u32 data)
{
	struct tg3 *tp = netdev_priv(dev);
	int i;

	if (!netif_running(tp->dev))
		return -EAGAIN;

	if (data == 0)
S
Stephen Hemminger 已提交
10235
		data = UINT_MAX / 2;
M
Michael Chan 已提交
10236 10237 10238 10239 10240 10241 10242 10243 10244 10245

	for (i = 0; i < (data * 2); i++) {
		if ((i % 2) == 0)
			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
					   LED_CTRL_1000MBPS_ON |
					   LED_CTRL_100MBPS_ON |
					   LED_CTRL_10MBPS_ON |
					   LED_CTRL_TRAFFIC_OVERRIDE |
					   LED_CTRL_TRAFFIC_BLINK |
					   LED_CTRL_TRAFFIC_LED);
10246

M
Michael Chan 已提交
10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257
		else
			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
					   LED_CTRL_TRAFFIC_OVERRIDE);

		if (msleep_interruptible(500))
			break;
	}
	tw32(MAC_LED_CTRL, tp->led_ctrl);
	return 0;
}

L
Linus Torvalds 已提交
10258 10259 10260 10261 10262 10263 10264
static void tg3_get_ethtool_stats (struct net_device *dev,
				   struct ethtool_stats *estats, u64 *tmp_stats)
{
	struct tg3 *tp = netdev_priv(dev);
	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
}

M
Michael Chan 已提交
10265
#define NVRAM_TEST_SIZE 0x100
M
Matt Carlson 已提交
10266 10267 10268
#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
10269 10270
#define NVRAM_SELFBOOT_HW_SIZE 0x20
#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
M
Michael Chan 已提交
10271 10272 10273

static int tg3_test_nvram(struct tg3 *tp)
{
A
Al Viro 已提交
10274
	u32 csum, magic;
10275
	__be32 *buf;
A
Andy Gospodarek 已提交
10276
	int i, j, k, err = 0, size;
M
Michael Chan 已提交
10277

M
Matt Carlson 已提交
10278 10279 10280
	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
		return 0;

10281
	if (tg3_nvram_read(tp, 0, &magic) != 0)
M
Michael Chan 已提交
10282 10283 10284 10285
		return -EIO;

	if (magic == TG3_EEPROM_MAGIC)
		size = NVRAM_TEST_SIZE;
10286
	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
M
Matt Carlson 已提交
10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302
		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
		    TG3_EEPROM_SB_FORMAT_1) {
			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
			case TG3_EEPROM_SB_REVISION_0:
				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
				break;
			case TG3_EEPROM_SB_REVISION_2:
				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
				break;
			case TG3_EEPROM_SB_REVISION_3:
				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
				break;
			default:
				return 0;
			}
		} else
M
Michael Chan 已提交
10303
			return 0;
10304 10305 10306
	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
		size = NVRAM_SELFBOOT_HW_SIZE;
	else
M
Michael Chan 已提交
10307 10308 10309
		return -EIO;

	buf = kmalloc(size, GFP_KERNEL);
M
Michael Chan 已提交
10310 10311 10312
	if (buf == NULL)
		return -ENOMEM;

M
Michael Chan 已提交
10313 10314
	err = -EIO;
	for (i = 0, j = 0; i < size; i += 4, j++) {
10315 10316
		err = tg3_nvram_read_be32(tp, i, &buf[j]);
		if (err)
M
Michael Chan 已提交
10317 10318
			break;
	}
M
Michael Chan 已提交
10319
	if (i < size)
M
Michael Chan 已提交
10320 10321
		goto out;

M
Michael Chan 已提交
10322
	/* Selfboot format */
10323
	magic = be32_to_cpu(buf[0]);
A
Al Viro 已提交
10324
	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10325
	    TG3_EEPROM_MAGIC_FW) {
M
Michael Chan 已提交
10326 10327
		u8 *buf8 = (u8 *) buf, csum8 = 0;

A
Al Viro 已提交
10328
		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
M
Matt Carlson 已提交
10329 10330 10331 10332 10333 10334 10335 10336 10337 10338
		    TG3_EEPROM_SB_REVISION_2) {
			/* For rev 2, the csum doesn't include the MBA. */
			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
				csum8 += buf8[i];
			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
				csum8 += buf8[i];
		} else {
			for (i = 0; i < size; i++)
				csum8 += buf8[i];
		}
M
Michael Chan 已提交
10339

A
Adrian Bunk 已提交
10340 10341 10342 10343 10344 10345 10346
		if (csum8 == 0) {
			err = 0;
			goto out;
		}

		err = -EIO;
		goto out;
M
Michael Chan 已提交
10347
	}
M
Michael Chan 已提交
10348

A
Al Viro 已提交
10349
	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10350 10351
	    TG3_EEPROM_MAGIC_HW) {
		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10352
		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392
		u8 *buf8 = (u8 *) buf;

		/* Separate the parity bits and the data bytes.  */
		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
			if ((i == 0) || (i == 8)) {
				int l;
				u8 msk;

				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
					parity[k++] = buf8[i] & msk;
				i++;
			}
			else if (i == 16) {
				int l;
				u8 msk;

				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
					parity[k++] = buf8[i] & msk;
				i++;

				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
					parity[k++] = buf8[i] & msk;
				i++;
			}
			data[j++] = buf8[i];
		}

		err = -EIO;
		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
			u8 hw8 = hweight8(data[i]);

			if ((hw8 & 0x1) && parity[i])
				goto out;
			else if (!(hw8 & 0x1) && !parity[i])
				goto out;
		}
		err = 0;
		goto out;
	}

M
Michael Chan 已提交
10393 10394
	/* Bootstrap checksum at offset 0x10 */
	csum = calc_crc((unsigned char *) buf, 0x10);
10395
	if (csum != be32_to_cpu(buf[0x10/4]))
M
Michael Chan 已提交
10396 10397 10398 10399
		goto out;

	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10400 10401
	if (csum != be32_to_cpu(buf[0xfc/4]))
		goto out;
M
Michael Chan 已提交
10402 10403 10404 10405 10406 10407 10408 10409

	err = 0;

out:
	kfree(buf);
	return err;
}

M
Michael Chan 已提交
10410 10411 10412 10413 10414 10415 10416 10417 10418 10419
#define TG3_SERDES_TIMEOUT_SEC	2
#define TG3_COPPER_TIMEOUT_SEC	6

static int tg3_test_link(struct tg3 *tp)
{
	int i, max;

	if (!netif_running(tp->dev))
		return -ENODEV;

M
Michael Chan 已提交
10420
	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
M
Michael Chan 已提交
10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435
		max = TG3_SERDES_TIMEOUT_SEC;
	else
		max = TG3_COPPER_TIMEOUT_SEC;

	for (i = 0; i < max; i++) {
		if (netif_carrier_ok(tp->dev))
			return 0;

		if (msleep_interruptible(1000))
			break;
	}

	return -EIO;
}

M
Michael Chan 已提交
10436
/* Only test the commonly used registers */
10437
static int tg3_test_registers(struct tg3 *tp)
M
Michael Chan 已提交
10438
{
10439
	int i, is_5705, is_5750;
M
Michael Chan 已提交
10440 10441 10442 10443 10444 10445 10446
	u32 offset, read_mask, write_mask, val, save_val, read_val;
	static struct {
		u16 offset;
		u16 flags;
#define TG3_FL_5705	0x1
#define TG3_FL_NOT_5705	0x2
#define TG3_FL_NOT_5788	0x4
10447
#define TG3_FL_NOT_5750	0x8
M
Michael Chan 已提交
10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499
		u32 read_mask;
		u32 write_mask;
	} reg_tbl[] = {
		/* MAC Control Registers */
		{ MAC_MODE, TG3_FL_NOT_5705,
			0x00000000, 0x00ef6f8c },
		{ MAC_MODE, TG3_FL_5705,
			0x00000000, 0x01ef6b8c },
		{ MAC_STATUS, TG3_FL_NOT_5705,
			0x03800107, 0x00000000 },
		{ MAC_STATUS, TG3_FL_5705,
			0x03800100, 0x00000000 },
		{ MAC_ADDR_0_HIGH, 0x0000,
			0x00000000, 0x0000ffff },
		{ MAC_ADDR_0_LOW, 0x0000,
		       	0x00000000, 0xffffffff },
		{ MAC_RX_MTU_SIZE, 0x0000,
			0x00000000, 0x0000ffff },
		{ MAC_TX_MODE, 0x0000,
			0x00000000, 0x00000070 },
		{ MAC_TX_LENGTHS, 0x0000,
			0x00000000, 0x00003fff },
		{ MAC_RX_MODE, TG3_FL_NOT_5705,
			0x00000000, 0x000007fc },
		{ MAC_RX_MODE, TG3_FL_5705,
			0x00000000, 0x000007dc },
		{ MAC_HASH_REG_0, 0x0000,
			0x00000000, 0xffffffff },
		{ MAC_HASH_REG_1, 0x0000,
			0x00000000, 0xffffffff },
		{ MAC_HASH_REG_2, 0x0000,
			0x00000000, 0xffffffff },
		{ MAC_HASH_REG_3, 0x0000,
			0x00000000, 0xffffffff },

		/* Receive Data and Receive BD Initiator Control Registers. */
		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
			0x00000000, 0x00000003 },
		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ RCVDBDI_STD_BD+0, 0x0000,
			0x00000000, 0xffffffff },
		{ RCVDBDI_STD_BD+4, 0x0000,
			0x00000000, 0xffffffff },
		{ RCVDBDI_STD_BD+8, 0x0000,
			0x00000000, 0xffff0002 },
		{ RCVDBDI_STD_BD+0xc, 0x0000,
			0x00000000, 0xffffffff },
10500

M
Michael Chan 已提交
10501 10502 10503 10504 10505 10506 10507
		/* Receive BD Initiator Control Registers. */
		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ RCVBDI_STD_THRESH, TG3_FL_5705,
			0x00000000, 0x000003ff },
		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
10508

M
Michael Chan 已提交
10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557
		/* Host Coalescing Control Registers. */
		{ HOSTCC_MODE, TG3_FL_NOT_5705,
			0x00000000, 0x00000004 },
		{ HOSTCC_MODE, TG3_FL_5705,
			0x00000000, 0x000000f6 },
		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
			0x00000000, 0x000003ff },
		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
			0x00000000, 0x000003ff },
		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
			0x00000000, 0x000000ff },
		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
			0x00000000, 0x000000ff },
		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
			0x00000000, 0x000000ff },
		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
			0x00000000, 0x000000ff },
		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
			0x00000000, 0xffffffff },
		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
			0x00000000, 0xffffffff },
		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
			0x00000000, 0xffffffff },
		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
			0xffffffff, 0x00000000 },
		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
			0xffffffff, 0x00000000 },

		/* Buffer Manager Control Registers. */
10558
		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
M
Michael Chan 已提交
10559
			0x00000000, 0x007fff80 },
10560
		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
M
Michael Chan 已提交
10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571
			0x00000000, 0x007fffff },
		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
			0x00000000, 0x0000003f },
		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
			0x00000000, 0x000001ff },
		{ BUFMGR_MB_HIGH_WATER, 0x0000,
			0x00000000, 0x000001ff },
		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
			0xffffffff, 0x00000000 },
		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
			0xffffffff, 0x00000000 },
10572

M
Michael Chan 已提交
10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585
		/* Mailbox Registers */
		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
			0x00000000, 0x000001ff },
		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
			0x00000000, 0x000001ff },
		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
			0x00000000, 0x000007ff },
		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
			0x00000000, 0x000001ff },

		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
	};

10586 10587
	is_5705 = is_5750 = 0;
	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
M
Michael Chan 已提交
10588
		is_5705 = 1;
10589 10590 10591
		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
			is_5750 = 1;
	}
M
Michael Chan 已提交
10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603

	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
			continue;

		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
			continue;

		if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
			continue;

10604 10605 10606
		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
			continue;

M
Michael Chan 已提交
10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649
		offset = (u32) reg_tbl[i].offset;
		read_mask = reg_tbl[i].read_mask;
		write_mask = reg_tbl[i].write_mask;

		/* Save the original register content */
		save_val = tr32(offset);

		/* Determine the read-only value. */
		read_val = save_val & read_mask;

		/* Write zero to the register, then make sure the read-only bits
		 * are not changed and the read/write bits are all zeros.
		 */
		tw32(offset, 0);

		val = tr32(offset);

		/* Test the read-only and read/write bits. */
		if (((val & read_mask) != read_val) || (val & write_mask))
			goto out;

		/* Write ones to all the bits defined by RdMask and WrMask, then
		 * make sure the read-only bits are not changed and the
		 * read/write bits are all ones.
		 */
		tw32(offset, read_mask | write_mask);

		val = tr32(offset);

		/* Test the read-only bits. */
		if ((val & read_mask) != read_val)
			goto out;

		/* Test the read/write bits. */
		if ((val & write_mask) != write_mask)
			goto out;

		tw32(offset, save_val);
	}

	return 0;

out:
M
Michael Chan 已提交
10650
	if (netif_msg_hw(tp))
10651
		pr_err("Register test failed at offset %x\n", offset);
M
Michael Chan 已提交
10652 10653 10654 10655
	tw32(offset, save_val);
	return -EIO;
}

M
Michael Chan 已提交
10656 10657
static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
{
10658
	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
M
Michael Chan 已提交
10659 10660 10661
	int i;
	u32 j;

10662
	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
M
Michael Chan 已提交
10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680
		for (j = 0; j < len; j += 4) {
			u32 val;

			tg3_write_mem(tp, offset + j, test_pattern[i]);
			tg3_read_mem(tp, offset + j, &val);
			if (val != test_pattern[i])
				return -EIO;
		}
	}
	return 0;
}

static int tg3_test_memory(struct tg3 *tp)
{
	static struct mem_entry {
		u32 offset;
		u32 len;
	} mem_tbl_570x[] = {
M
Michael Chan 已提交
10681
		{ 0x00000000, 0x00b50},
M
Michael Chan 已提交
10682 10683 10684 10685 10686 10687 10688 10689 10690 10691
		{ 0x00002000, 0x1c000},
		{ 0xffffffff, 0x00000}
	}, mem_tbl_5705[] = {
		{ 0x00000100, 0x0000c},
		{ 0x00000200, 0x00008},
		{ 0x00004000, 0x00800},
		{ 0x00006000, 0x01000},
		{ 0x00008000, 0x02000},
		{ 0x00010000, 0x0e000},
		{ 0xffffffff, 0x00000}
M
Michael Chan 已提交
10692 10693 10694 10695 10696 10697 10698
	}, mem_tbl_5755[] = {
		{ 0x00000200, 0x00008},
		{ 0x00004000, 0x00800},
		{ 0x00006000, 0x00800},
		{ 0x00008000, 0x02000},
		{ 0x00010000, 0x0c000},
		{ 0xffffffff, 0x00000}
10699 10700 10701 10702 10703 10704 10705
	}, mem_tbl_5906[] = {
		{ 0x00000200, 0x00008},
		{ 0x00004000, 0x00400},
		{ 0x00006000, 0x00400},
		{ 0x00008000, 0x01000},
		{ 0x00010000, 0x01000},
		{ 0xffffffff, 0x00000}
10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716
	}, mem_tbl_5717[] = {
		{ 0x00000200, 0x00008},
		{ 0x00010000, 0x0a000},
		{ 0x00020000, 0x13c00},
		{ 0xffffffff, 0x00000}
	}, mem_tbl_57765[] = {
		{ 0x00000200, 0x00008},
		{ 0x00004000, 0x00800},
		{ 0x00006000, 0x09800},
		{ 0x00010000, 0x0a000},
		{ 0xffffffff, 0x00000}
M
Michael Chan 已提交
10717 10718 10719 10720 10721
	};
	struct mem_entry *mem_tbl;
	int err = 0;
	int i;

10722 10723 10724 10725 10726
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
		mem_tbl = mem_tbl_5717;
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
		mem_tbl = mem_tbl_57765;
	else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
M
Matt Carlson 已提交
10727 10728 10729 10730 10731 10732
		mem_tbl = mem_tbl_5755;
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
		mem_tbl = mem_tbl_5906;
	else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
		mem_tbl = mem_tbl_5705;
	else
M
Michael Chan 已提交
10733 10734 10735 10736 10737 10738 10739
		mem_tbl = mem_tbl_570x;

	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
		if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
		    mem_tbl[i].len)) != 0)
			break;
	}
10740

M
Michael Chan 已提交
10741 10742 10743
	return err;
}

M
Michael Chan 已提交
10744 10745 10746 10747
#define TG3_MAC_LOOPBACK	0
#define TG3_PHY_LOOPBACK	1

static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
M
Michael Chan 已提交
10748
{
M
Michael Chan 已提交
10749
	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10750
	u32 desc_idx, coal_now;
M
Michael Chan 已提交
10751 10752 10753 10754 10755
	struct sk_buff *skb, *rx_skb;
	u8 *tx_data;
	dma_addr_t map;
	int num_pkts, tx_len, rx_len, i, err;
	struct tg3_rx_buffer_desc *desc;
10756
	struct tg3_napi *tnapi, *rnapi;
10757
	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
M
Michael Chan 已提交
10758

10759 10760
	tnapi = &tp->napi[0];
	rnapi = &tp->napi[0];
10761 10762
	if (tp->irq_cnt > 1) {
		rnapi = &tp->napi[1];
10763 10764
		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
			tnapi = &tp->napi[1];
10765
	}
10766
	coal_now = tnapi->coal_now | rnapi->coal_now;
10767

M
Michael Chan 已提交
10768
	if (loopback_mode == TG3_MAC_LOOPBACK) {
M
Michael Chan 已提交
10769 10770 10771 10772 10773 10774 10775
		/* HW errata - mac loopback fails in some cases on 5780.
		 * Normal traffic and PHY loopback are not affected by
		 * errata.
		 */
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
			return 0;

M
Michael Chan 已提交
10776
		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
M
Matt Carlson 已提交
10777 10778 10779
			   MAC_MODE_PORT_INT_LPBACK;
		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
			mac_mode |= MAC_MODE_LINK_POLARITY;
M
Michael Chan 已提交
10780 10781 10782 10783
		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
			mac_mode |= MAC_MODE_PORT_MODE_MII;
		else
			mac_mode |= MAC_MODE_PORT_MODE_GMII;
M
Michael Chan 已提交
10784 10785
		tw32(MAC_MODE, mac_mode);
	} else if (loopback_mode == TG3_PHY_LOOPBACK) {
M
Michael Chan 已提交
10786 10787
		u32 val;

10788 10789
		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
			tg3_phy_fet_toggle_apd(tp, false);
M
Michael Chan 已提交
10790 10791 10792
			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
		} else
			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
M
Michael Chan 已提交
10793

M
Matt Carlson 已提交
10794 10795
		tg3_phy_toggle_automdix(tp, 0);

M
Michael Chan 已提交
10796
		tg3_writephy(tp, MII_BMCR, val);
M
Michael Chan 已提交
10797
		udelay(40);
M
Michael Chan 已提交
10798

M
Matt Carlson 已提交
10799
		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10800
		if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10801 10802 10803 10804 10805 10806
			tg3_writephy(tp, MII_TG3_FET_PTEST,
				     MII_TG3_FET_PTEST_FRC_TX_LINK |
				     MII_TG3_FET_PTEST_FRC_TX_LOCK);
			/* The write needs to be flushed for the AC131 */
			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
				tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
M
Michael Chan 已提交
10807 10808 10809
			mac_mode |= MAC_MODE_PORT_MODE_MII;
		} else
			mac_mode |= MAC_MODE_PORT_MODE_GMII;
10810

M
Michael Chan 已提交
10811 10812 10813 10814 10815 10816
		/* reset to prevent losing 1st rx packet intermittently */
		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
			udelay(10);
			tw32_f(MAC_RX_MODE, tp->rx_mode);
		}
M
Matt Carlson 已提交
10817
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10818 10819
			u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
			if (masked_phy_id == TG3_PHY_ID_BCM5401)
M
Matt Carlson 已提交
10820
				mac_mode &= ~MAC_MODE_LINK_POLARITY;
10821
			else if (masked_phy_id == TG3_PHY_ID_BCM5411)
M
Matt Carlson 已提交
10822
				mac_mode |= MAC_MODE_LINK_POLARITY;
M
Michael Chan 已提交
10823 10824 10825
			tg3_writephy(tp, MII_TG3_EXT_CTRL,
				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
		}
M
Michael Chan 已提交
10826 10827 10828 10829
		tw32(MAC_MODE, mac_mode);
	}
	else
		return -EINVAL;
M
Michael Chan 已提交
10830 10831 10832 10833

	err = -EIO;

	tx_len = 1514;
10834
	skb = netdev_alloc_skb(tp->dev, tx_len);
10835 10836 10837
	if (!skb)
		return -ENOMEM;

M
Michael Chan 已提交
10838 10839 10840 10841 10842 10843 10844 10845 10846
	tx_data = skb_put(skb, tx_len);
	memcpy(tx_data, tp->dev->dev_addr, 6);
	memset(tx_data + 6, 0x0, 8);

	tw32(MAC_RX_MTU_SIZE, tx_len + 4);

	for (i = 14; i < tx_len; i++)
		tx_data[i] = (u8) (i & 0xff);

10847 10848
	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(tp->pdev, map)) {
10849 10850 10851
		dev_kfree_skb(skb);
		return -EIO;
	}
M
Michael Chan 已提交
10852 10853

	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10854
	       rnapi->coal_now);
M
Michael Chan 已提交
10855 10856 10857

	udelay(10);

10858
	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
M
Michael Chan 已提交
10859 10860 10861

	num_pkts = 0;

10862
	tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
M
Michael Chan 已提交
10863

10864
	tnapi->tx_prod++;
M
Michael Chan 已提交
10865 10866
	num_pkts++;

10867 10868
	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
	tr32_mailbox(tnapi->prodmbox);
M
Michael Chan 已提交
10869 10870 10871

	udelay(10);

M
Matt Carlson 已提交
10872 10873
	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
	for (i = 0; i < 35; i++) {
M
Michael Chan 已提交
10874
		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10875
		       coal_now);
M
Michael Chan 已提交
10876 10877 10878

		udelay(10);

10879 10880
		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
		rx_idx = rnapi->hw_status->idx[0].rx_producer;
10881
		if ((tx_idx == tnapi->tx_prod) &&
M
Michael Chan 已提交
10882 10883 10884 10885
		    (rx_idx == (rx_start_idx + num_pkts)))
			break;
	}

10886
	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
M
Michael Chan 已提交
10887 10888
	dev_kfree_skb(skb);

10889
	if (tx_idx != tnapi->tx_prod)
M
Michael Chan 已提交
10890 10891 10892 10893 10894
		goto out;

	if (rx_idx != rx_start_idx + num_pkts)
		goto out;

10895
	desc = &rnapi->rx_rcb[rx_start_idx];
M
Michael Chan 已提交
10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908
	desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
	opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
	if (opaque_key != RXD_OPAQUE_RING_STD)
		goto out;

	if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
	    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
		goto out;

	rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
	if (rx_len != tx_len)
		goto out;

10909
	rx_skb = tpr->rx_std_buffers[desc_idx].skb;
M
Michael Chan 已提交
10910

10911
	map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
M
Michael Chan 已提交
10912 10913 10914 10915 10916 10917 10918
	pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);

	for (i = 14; i < tx_len; i++) {
		if (*(rx_skb->data + i) != (u8) (i & 0xff))
			goto out;
	}
	err = 0;
10919

M
Michael Chan 已提交
10920 10921 10922 10923 10924
	/* tg3_free_rings will unmap and free the rx_skb */
out:
	return err;
}

M
Michael Chan 已提交
10925 10926 10927 10928 10929 10930 10931 10932
#define TG3_MAC_LOOPBACK_FAILED		1
#define TG3_PHY_LOOPBACK_FAILED		2
#define TG3_LOOPBACK_FAILED		(TG3_MAC_LOOPBACK_FAILED |	\
					 TG3_PHY_LOOPBACK_FAILED)

static int tg3_test_loopback(struct tg3 *tp)
{
	int err = 0;
M
Matt Carlson 已提交
10933
	u32 cpmuctrl = 0;
M
Michael Chan 已提交
10934 10935 10936 10937

	if (!netif_running(tp->dev))
		return TG3_LOOPBACK_FAILED;

M
Michael Chan 已提交
10938 10939 10940
	err = tg3_reset_hw(tp, 1);
	if (err)
		return TG3_LOOPBACK_FAILED;
M
Michael Chan 已提交
10941

10942 10943 10944 10945
	/* Turn off gphy autopowerdown. */
	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
		tg3_phy_toggle_apd(tp, false);

M
Matt Carlson 已提交
10946
	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
M
Matt Carlson 已提交
10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962
		int i;
		u32 status;

		tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);

		/* Wait for up to 40 microseconds to acquire lock. */
		for (i = 0; i < 4; i++) {
			status = tr32(TG3_CPMU_MUTEX_GNT);
			if (status == CPMU_MUTEX_GNT_DRIVER)
				break;
			udelay(10);
		}

		if (status != CPMU_MUTEX_GNT_DRIVER)
			return TG3_LOOPBACK_FAILED;

10963
		/* Turn off link-based power management. */
10964
		cpmuctrl = tr32(TG3_CPMU_CTRL);
10965 10966 10967
		tw32(TG3_CPMU_CTRL,
		     cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
				  CPMU_CTRL_LINK_AWARE_MODE));
M
Matt Carlson 已提交
10968 10969
	}

M
Michael Chan 已提交
10970 10971
	if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
		err |= TG3_MAC_LOOPBACK_FAILED;
M
Matt Carlson 已提交
10972

M
Matt Carlson 已提交
10973
	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
M
Matt Carlson 已提交
10974 10975 10976 10977 10978 10979
		tw32(TG3_CPMU_CTRL, cpmuctrl);

		/* Release the mutex */
		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
	}

M
Matt Carlson 已提交
10980 10981
	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
M
Michael Chan 已提交
10982 10983 10984 10985
		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
			err |= TG3_PHY_LOOPBACK_FAILED;
	}

10986 10987 10988 10989
	/* Re-enable gphy autopowerdown. */
	if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
		tg3_phy_toggle_apd(tp, true);

M
Michael Chan 已提交
10990 10991 10992
	return err;
}

10993 10994 10995
static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
			  u64 *data)
{
M
Michael Chan 已提交
10996 10997
	struct tg3 *tp = netdev_priv(dev);

M
Michael Chan 已提交
10998 10999 11000
	if (tp->link_config.phy_is_low_power)
		tg3_set_power_state(tp, PCI_D0);

M
Michael Chan 已提交
11001 11002 11003 11004 11005 11006
	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);

	if (tg3_test_nvram(tp) != 0) {
		etest->flags |= ETH_TEST_FL_FAILED;
		data[0] = 1;
	}
M
Michael Chan 已提交
11007 11008 11009 11010
	if (tg3_test_link(tp) != 0) {
		etest->flags |= ETH_TEST_FL_FAILED;
		data[1] = 1;
	}
M
Michael Chan 已提交
11011
	if (etest->flags & ETH_TEST_FL_OFFLINE) {
M
Matt Carlson 已提交
11012
		int err, err2 = 0, irq_sync = 0;
11013 11014

		if (netif_running(dev)) {
M
Matt Carlson 已提交
11015
			tg3_phy_stop(tp);
M
Michael Chan 已提交
11016
			tg3_netif_stop(tp);
11017 11018
			irq_sync = 1;
		}
M
Michael Chan 已提交
11019

11020
		tg3_full_lock(tp, irq_sync);
M
Michael Chan 已提交
11021 11022

		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
M
Michael Chan 已提交
11023
		err = tg3_nvram_lock(tp);
M
Michael Chan 已提交
11024 11025 11026
		tg3_halt_cpu(tp, RX_CPU_BASE);
		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
			tg3_halt_cpu(tp, TX_CPU_BASE);
M
Michael Chan 已提交
11027 11028
		if (!err)
			tg3_nvram_unlock(tp);
M
Michael Chan 已提交
11029

11030 11031 11032
		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
			tg3_phy_reset(tp);

M
Michael Chan 已提交
11033 11034 11035 11036
		if (tg3_test_registers(tp) != 0) {
			etest->flags |= ETH_TEST_FL_FAILED;
			data[2] = 1;
		}
M
Michael Chan 已提交
11037 11038 11039 11040
		if (tg3_test_memory(tp) != 0) {
			etest->flags |= ETH_TEST_FL_FAILED;
			data[3] = 1;
		}
M
Michael Chan 已提交
11041
		if ((data[4] = tg3_test_loopback(tp)) != 0)
M
Michael Chan 已提交
11042
			etest->flags |= ETH_TEST_FL_FAILED;
M
Michael Chan 已提交
11043

11044 11045
		tg3_full_unlock(tp);

M
Michael Chan 已提交
11046 11047 11048 11049
		if (tg3_test_interrupt(tp) != 0) {
			etest->flags |= ETH_TEST_FL_FAILED;
			data[5] = 1;
		}
11050 11051

		tg3_full_lock(tp, 0);
M
Michael Chan 已提交
11052

M
Michael Chan 已提交
11053 11054 11055
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
		if (netif_running(dev)) {
			tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
M
Matt Carlson 已提交
11056 11057
			err2 = tg3_restart_hw(tp, 1);
			if (!err2)
M
Michael Chan 已提交
11058
				tg3_netif_start(tp);
M
Michael Chan 已提交
11059
		}
11060 11061

		tg3_full_unlock(tp);
M
Matt Carlson 已提交
11062 11063 11064

		if (irq_sync && !err2)
			tg3_phy_start(tp);
M
Michael Chan 已提交
11065
	}
M
Michael Chan 已提交
11066 11067 11068
	if (tp->link_config.phy_is_low_power)
		tg3_set_power_state(tp, PCI_D3hot);

11069 11070
}

L
Linus Torvalds 已提交
11071 11072 11073 11074 11075 11076
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	struct mii_ioctl_data *data = if_mii(ifr);
	struct tg3 *tp = netdev_priv(dev);
	int err;

M
Matt Carlson 已提交
11077
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11078
		struct phy_device *phydev;
M
Matt Carlson 已提交
11079 11080
		if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
			return -EAGAIN;
11081 11082
		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
		return phy_mii_ioctl(phydev, data, cmd);
M
Matt Carlson 已提交
11083 11084
	}

L
Linus Torvalds 已提交
11085 11086
	switch(cmd) {
	case SIOCGMIIPHY:
11087
		data->phy_id = tp->phy_addr;
L
Linus Torvalds 已提交
11088 11089 11090 11091 11092 11093 11094 11095

		/* fallthru */
	case SIOCGMIIREG: {
		u32 mii_regval;

		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
			break;			/* We have no PHY */

M
Michael Chan 已提交
11096 11097 11098
		if (tp->link_config.phy_is_low_power)
			return -EAGAIN;

11099
		spin_lock_bh(&tp->lock);
L
Linus Torvalds 已提交
11100
		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11101
		spin_unlock_bh(&tp->lock);
L
Linus Torvalds 已提交
11102 11103 11104 11105 11106 11107 11108 11109 11110 11111

		data->val_out = mii_regval;

		return err;
	}

	case SIOCSMIIREG:
		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
			break;			/* We have no PHY */

M
Michael Chan 已提交
11112 11113 11114
		if (tp->link_config.phy_is_low_power)
			return -EAGAIN;

11115
		spin_lock_bh(&tp->lock);
L
Linus Torvalds 已提交
11116
		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11117
		spin_unlock_bh(&tp->lock);
L
Linus Torvalds 已提交
11118 11119 11120 11121 11122 11123 11124 11125 11126 11127 11128 11129 11130 11131 11132

		return err;

	default:
		/* do nothing */
		break;
	}
	return -EOPNOTSUPP;
}

#if TG3_VLAN_TAG_USED
static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
	struct tg3 *tp = netdev_priv(dev);

11133 11134 11135 11136 11137 11138
	if (!netif_running(dev)) {
		tp->vlgrp = grp;
		return;
	}

	tg3_netif_stop(tp);
11139

11140
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
11141 11142 11143 11144 11145 11146

	tp->vlgrp = grp;

	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
	__tg3_set_rx_mode(dev);

11147
	tg3_netif_start(tp);
11148 11149

	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
11150 11151 11152
}
#endif

11153 11154 11155 11156 11157 11158 11159 11160
static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
	struct tg3 *tp = netdev_priv(dev);

	memcpy(ec, &tp->coal, sizeof(*ec));
	return 0;
}

M
Michael Chan 已提交
11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214
static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
	struct tg3 *tp = netdev_priv(dev);
	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
	}

	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
		return -EINVAL;

	/* No rx interrupts will be generated if both are zero */
	if ((ec->rx_coalesce_usecs == 0) &&
	    (ec->rx_max_coalesced_frames == 0))
		return -EINVAL;

	/* No tx interrupts will be generated if both are zero */
	if ((ec->tx_coalesce_usecs == 0) &&
	    (ec->tx_max_coalesced_frames == 0))
		return -EINVAL;

	/* Only copy relevant parameters, ignore all others. */
	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;

	if (netif_running(dev)) {
		tg3_full_lock(tp, 0);
		__tg3_set_coalesce(tp, &tp->coal);
		tg3_full_unlock(tp);
	}
	return 0;
}

11215
static const struct ethtool_ops tg3_ethtool_ops = {
L
Linus Torvalds 已提交
11216 11217 11218 11219 11220 11221 11222 11223 11224 11225 11226 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238
	.get_settings		= tg3_get_settings,
	.set_settings		= tg3_set_settings,
	.get_drvinfo		= tg3_get_drvinfo,
	.get_regs_len		= tg3_get_regs_len,
	.get_regs		= tg3_get_regs,
	.get_wol		= tg3_get_wol,
	.set_wol		= tg3_set_wol,
	.get_msglevel		= tg3_get_msglevel,
	.set_msglevel		= tg3_set_msglevel,
	.nway_reset		= tg3_nway_reset,
	.get_link		= ethtool_op_get_link,
	.get_eeprom_len		= tg3_get_eeprom_len,
	.get_eeprom		= tg3_get_eeprom,
	.set_eeprom		= tg3_set_eeprom,
	.get_ringparam		= tg3_get_ringparam,
	.set_ringparam		= tg3_set_ringparam,
	.get_pauseparam		= tg3_get_pauseparam,
	.set_pauseparam		= tg3_set_pauseparam,
	.get_rx_csum		= tg3_get_rx_csum,
	.set_rx_csum		= tg3_set_rx_csum,
	.set_tx_csum		= tg3_set_tx_csum,
	.set_sg			= ethtool_op_set_sg,
	.set_tso		= tg3_set_tso,
11239
	.self_test		= tg3_self_test,
L
Linus Torvalds 已提交
11240
	.get_strings		= tg3_get_strings,
M
Michael Chan 已提交
11241
	.phys_id		= tg3_phys_id,
L
Linus Torvalds 已提交
11242
	.get_ethtool_stats	= tg3_get_ethtool_stats,
11243
	.get_coalesce		= tg3_get_coalesce,
M
Michael Chan 已提交
11244
	.set_coalesce		= tg3_set_coalesce,
11245
	.get_sset_count		= tg3_get_sset_count,
L
Linus Torvalds 已提交
11246 11247 11248 11249
};

static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
{
M
Michael Chan 已提交
11250
	u32 cursize, val, magic;
L
Linus Torvalds 已提交
11251 11252 11253

	tp->nvram_size = EEPROM_CHIP_SIZE;

11254
	if (tg3_nvram_read(tp, 0, &magic) != 0)
L
Linus Torvalds 已提交
11255 11256
		return;

11257 11258 11259
	if ((magic != TG3_EEPROM_MAGIC) &&
	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
L
Linus Torvalds 已提交
11260 11261 11262 11263 11264 11265 11266
		return;

	/*
	 * Size the chip by reading offsets at increasing powers of two.
	 * When we encounter our validation signature, we know the addressing
	 * has wrapped around, and thus have our chip size.
	 */
M
Michael Chan 已提交
11267
	cursize = 0x10;
L
Linus Torvalds 已提交
11268 11269

	while (cursize < tp->nvram_size) {
11270
		if (tg3_nvram_read(tp, cursize, &val) != 0)
L
Linus Torvalds 已提交
11271 11272
			return;

M
Michael Chan 已提交
11273
		if (val == magic)
L
Linus Torvalds 已提交
11274 11275 11276 11277 11278 11279 11280
			break;

		cursize <<= 1;
	}

	tp->nvram_size = cursize;
}
11281

L
Linus Torvalds 已提交
11282 11283 11284 11285
static void __devinit tg3_get_nvram_size(struct tg3 *tp)
{
	u32 val;

M
Matt Carlson 已提交
11286 11287
	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
	    tg3_nvram_read(tp, 0, &val) != 0)
M
Michael Chan 已提交
11288 11289 11290
		return;

	/* Selfboot format */
M
Michael Chan 已提交
11291
	if (val != TG3_EEPROM_MAGIC) {
M
Michael Chan 已提交
11292 11293 11294 11295
		tg3_get_eeprom_size(tp);
		return;
	}

11296
	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
L
Linus Torvalds 已提交
11297
		if (val != 0) {
11298 11299 11300 11301 11302 11303 11304 11305 11306 11307 11308 11309
			/* This is confusing.  We want to operate on the
			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
			 * call will read from NVRAM and byteswap the data
			 * according to the byteswapping settings for all
			 * other register accesses.  This ensures the data we
			 * want will always reside in the lower 16-bits.
			 * However, the data in NVRAM is in LE format, which
			 * means the data from the NVRAM read will always be
			 * opposite the endianness of the CPU.  The 16-bit
			 * byteswap then brings the data to CPU endianness.
			 */
			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
L
Linus Torvalds 已提交
11310 11311 11312
			return;
		}
	}
M
Matt Carlson 已提交
11313
	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
L
Linus Torvalds 已提交
11314 11315 11316 11317 11318 11319 11320 11321 11322
}

static void __devinit tg3_get_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1;

	nvcfg1 = tr32(NVRAM_CFG1);
	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11323
	} else {
L
Linus Torvalds 已提交
11324 11325 11326 11327
		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
		tw32(NVRAM_CFG1, nvcfg1);
	}

M
Michael Chan 已提交
11328
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
M
Michael Chan 已提交
11329
	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
L
Linus Torvalds 已提交
11330
		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11331 11332 11333 11334 11335 11336 11337 11338 11339 11340 11341 11342 11343 11344 11345 11346 11347 11348 11349 11350 11351 11352 11353 11354 11355 11356 11357 11358
		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
			tp->nvram_jedecnum = JEDEC_ATMEL;
			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
			break;
		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
			tp->nvram_jedecnum = JEDEC_ATMEL;
			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
			break;
		case FLASH_VENDOR_ATMEL_EEPROM:
			tp->nvram_jedecnum = JEDEC_ATMEL;
			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
			break;
		case FLASH_VENDOR_ST:
			tp->nvram_jedecnum = JEDEC_ST;
			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
			break;
		case FLASH_VENDOR_SAIFUN:
			tp->nvram_jedecnum = JEDEC_SAIFUN;
			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
			break;
		case FLASH_VENDOR_SST_SMALL:
		case FLASH_VENDOR_SST_LARGE:
			tp->nvram_jedecnum = JEDEC_SST;
			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
			break;
L
Linus Torvalds 已提交
11359
		}
11360
	} else {
L
Linus Torvalds 已提交
11361 11362 11363 11364 11365 11366
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
	}
}

11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 11392 11393
static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
{
	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
	case FLASH_5752PAGE_SIZE_256:
		tp->nvram_pagesize = 256;
		break;
	case FLASH_5752PAGE_SIZE_512:
		tp->nvram_pagesize = 512;
		break;
	case FLASH_5752PAGE_SIZE_1K:
		tp->nvram_pagesize = 1024;
		break;
	case FLASH_5752PAGE_SIZE_2K:
		tp->nvram_pagesize = 2048;
		break;
	case FLASH_5752PAGE_SIZE_4K:
		tp->nvram_pagesize = 4096;
		break;
	case FLASH_5752PAGE_SIZE_264:
		tp->nvram_pagesize = 264;
		break;
	case FLASH_5752PAGE_SIZE_528:
		tp->nvram_pagesize = 528;
		break;
	}
}

11394 11395 11396 11397 11398 11399
static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1;

	nvcfg1 = tr32(NVRAM_CFG1);

11400 11401
	/* NVRAM protection for TPM */
	if (nvcfg1 & (1 << 27))
11402
		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11403

11404
	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421
	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		break;
	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		break;
	case FLASH_5752VENDOR_ST_M45PE10:
	case FLASH_5752VENDOR_ST_M45PE20:
	case FLASH_5752VENDOR_ST_M45PE40:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		break;
11422 11423 11424
	}

	if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11425
		tg3_nvram_get_pagesize(tp, nvcfg1);
11426
	} else {
11427 11428 11429 11430 11431 11432 11433 11434
		/* For eeprom, set pagesize to maximum eeprom size */
		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;

		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
		tw32(NVRAM_CFG1, nvcfg1);
	}
}

M
Michael Chan 已提交
11435 11436
static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
{
M
Matt Carlson 已提交
11437
	u32 nvcfg1, protect = 0;
M
Michael Chan 已提交
11438 11439 11440 11441

	nvcfg1 = tr32(NVRAM_CFG1);

	/* NVRAM protection for TPM */
M
Matt Carlson 已提交
11442
	if (nvcfg1 & (1 << 27)) {
11443
		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
M
Matt Carlson 已提交
11444 11445
		protect = 1;
	}
M
Michael Chan 已提交
11446

M
Matt Carlson 已提交
11447 11448
	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
	switch (nvcfg1) {
11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487
	case FLASH_5755VENDOR_ATMEL_FLASH_1:
	case FLASH_5755VENDOR_ATMEL_FLASH_2:
	case FLASH_5755VENDOR_ATMEL_FLASH_3:
	case FLASH_5755VENDOR_ATMEL_FLASH_5:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->nvram_pagesize = 264;
		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
			tp->nvram_size = (protect ? 0x3e200 :
					  TG3_NVRAM_SIZE_512KB);
		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
			tp->nvram_size = (protect ? 0x1f200 :
					  TG3_NVRAM_SIZE_256KB);
		else
			tp->nvram_size = (protect ? 0x1f200 :
					  TG3_NVRAM_SIZE_128KB);
		break;
	case FLASH_5752VENDOR_ST_M45PE10:
	case FLASH_5752VENDOR_ST_M45PE20:
	case FLASH_5752VENDOR_ST_M45PE40:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->nvram_pagesize = 256;
		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
			tp->nvram_size = (protect ?
					  TG3_NVRAM_SIZE_64KB :
					  TG3_NVRAM_SIZE_128KB);
		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
			tp->nvram_size = (protect ?
					  TG3_NVRAM_SIZE_64KB :
					  TG3_NVRAM_SIZE_256KB);
		else
			tp->nvram_size = (protect ?
					  TG3_NVRAM_SIZE_128KB :
					  TG3_NVRAM_SIZE_512KB);
		break;
M
Michael Chan 已提交
11488 11489 11490
	}
}

M
Michael Chan 已提交
11491 11492 11493 11494 11495 11496 11497
static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1;

	nvcfg1 = tr32(NVRAM_CFG1);

	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11498 11499 11500 11501 11502 11503 11504
	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
M
Michael Chan 已提交
11505

11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 11517 11518 11519 11520 11521 11522 11523 11524 11525
		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
		tw32(NVRAM_CFG1, nvcfg1);
		break;
	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
	case FLASH_5755VENDOR_ATMEL_FLASH_1:
	case FLASH_5755VENDOR_ATMEL_FLASH_2:
	case FLASH_5755VENDOR_ATMEL_FLASH_3:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->nvram_pagesize = 264;
		break;
	case FLASH_5752VENDOR_ST_M45PE10:
	case FLASH_5752VENDOR_ST_M45PE20:
	case FLASH_5752VENDOR_ST_M45PE40:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->nvram_pagesize = 256;
		break;
M
Michael Chan 已提交
11526 11527 11528
	}
}

11529 11530 11531 11532 11533 11534 11535 11536
static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1, protect = 0;

	nvcfg1 = tr32(NVRAM_CFG1);

	/* NVRAM protection for TPM */
	if (nvcfg1 & (1 << 27)) {
11537
		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11538 11539 11540 11541 11542
		protect = 1;
	}

	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
	switch (nvcfg1) {
11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569
	case FLASH_5761VENDOR_ATMEL_ADB021D:
	case FLASH_5761VENDOR_ATMEL_ADB041D:
	case FLASH_5761VENDOR_ATMEL_ADB081D:
	case FLASH_5761VENDOR_ATMEL_ADB161D:
	case FLASH_5761VENDOR_ATMEL_MDB021D:
	case FLASH_5761VENDOR_ATMEL_MDB041D:
	case FLASH_5761VENDOR_ATMEL_MDB081D:
	case FLASH_5761VENDOR_ATMEL_MDB161D:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
		tp->nvram_pagesize = 256;
		break;
	case FLASH_5761VENDOR_ST_A_M45PE20:
	case FLASH_5761VENDOR_ST_A_M45PE40:
	case FLASH_5761VENDOR_ST_A_M45PE80:
	case FLASH_5761VENDOR_ST_A_M45PE16:
	case FLASH_5761VENDOR_ST_M_M45PE20:
	case FLASH_5761VENDOR_ST_M_M45PE40:
	case FLASH_5761VENDOR_ST_M_M45PE80:
	case FLASH_5761VENDOR_ST_M_M45PE16:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;
		tp->nvram_pagesize = 256;
		break;
11570 11571 11572 11573 11574 11575
	}

	if (protect) {
		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
	} else {
		switch (nvcfg1) {
11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599
		case FLASH_5761VENDOR_ATMEL_ADB161D:
		case FLASH_5761VENDOR_ATMEL_MDB161D:
		case FLASH_5761VENDOR_ST_A_M45PE16:
		case FLASH_5761VENDOR_ST_M_M45PE16:
			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
			break;
		case FLASH_5761VENDOR_ATMEL_ADB081D:
		case FLASH_5761VENDOR_ATMEL_MDB081D:
		case FLASH_5761VENDOR_ST_A_M45PE80:
		case FLASH_5761VENDOR_ST_M_M45PE80:
			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
			break;
		case FLASH_5761VENDOR_ATMEL_ADB041D:
		case FLASH_5761VENDOR_ATMEL_MDB041D:
		case FLASH_5761VENDOR_ST_A_M45PE40:
		case FLASH_5761VENDOR_ST_M_M45PE40:
			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
			break;
		case FLASH_5761VENDOR_ATMEL_ADB021D:
		case FLASH_5761VENDOR_ATMEL_MDB021D:
		case FLASH_5761VENDOR_ST_A_M45PE20:
		case FLASH_5761VENDOR_ST_M_M45PE20:
			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
			break;
11600 11601 11602 11603
		}
	}
}

M
Michael Chan 已提交
11604 11605 11606 11607 11608 11609 11610
static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
{
	tp->nvram_jedecnum = JEDEC_ATMEL;
	tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
}

M
Matt Carlson 已提交
11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673
static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1;

	nvcfg1 = tr32(NVRAM_CFG1);

	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;

		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
		tw32(NVRAM_CFG1, nvcfg1);
		return;
	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;

		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
			break;
		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
			break;
		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
			break;
		}
		break;
	case FLASH_5752VENDOR_ST_M45PE10:
	case FLASH_5752VENDOR_ST_M45PE20:
	case FLASH_5752VENDOR_ST_M45PE40:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;

		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
		case FLASH_5752VENDOR_ST_M45PE10:
			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
			break;
		case FLASH_5752VENDOR_ST_M45PE20:
			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
			break;
		case FLASH_5752VENDOR_ST_M45PE40:
			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
			break;
		}
		break;
	default:
M
Matt Carlson 已提交
11674
		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
M
Matt Carlson 已提交
11675 11676 11677
		return;
	}

11678 11679
	tg3_nvram_get_pagesize(tp, nvcfg1);
	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
M
Matt Carlson 已提交
11680
		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720
}


static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
{
	u32 nvcfg1;

	nvcfg1 = tr32(NVRAM_CFG1);

	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
	case FLASH_5717VENDOR_ATMEL_EEPROM:
	case FLASH_5717VENDOR_MICRO_EEPROM:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;

		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
		tw32(NVRAM_CFG1, nvcfg1);
		return;
	case FLASH_5717VENDOR_ATMEL_MDB011D:
	case FLASH_5717VENDOR_ATMEL_ADB011B:
	case FLASH_5717VENDOR_ATMEL_ADB011D:
	case FLASH_5717VENDOR_ATMEL_MDB021D:
	case FLASH_5717VENDOR_ATMEL_ADB021B:
	case FLASH_5717VENDOR_ATMEL_ADB021D:
	case FLASH_5717VENDOR_ATMEL_45USPT:
		tp->nvram_jedecnum = JEDEC_ATMEL;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;

		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
		case FLASH_5717VENDOR_ATMEL_MDB021D:
		case FLASH_5717VENDOR_ATMEL_ADB021B:
		case FLASH_5717VENDOR_ATMEL_ADB021D:
			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
			break;
		default:
			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
			break;
		}
M
Matt Carlson 已提交
11721
		break;
11722 11723 11724 11725 11726 11727 11728 11729 11730 11731 11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745 11746
	case FLASH_5717VENDOR_ST_M_M25PE10:
	case FLASH_5717VENDOR_ST_A_M25PE10:
	case FLASH_5717VENDOR_ST_M_M45PE10:
	case FLASH_5717VENDOR_ST_A_M45PE10:
	case FLASH_5717VENDOR_ST_M_M25PE20:
	case FLASH_5717VENDOR_ST_A_M25PE20:
	case FLASH_5717VENDOR_ST_M_M45PE20:
	case FLASH_5717VENDOR_ST_A_M45PE20:
	case FLASH_5717VENDOR_ST_25USPT:
	case FLASH_5717VENDOR_ST_45USPT:
		tp->nvram_jedecnum = JEDEC_ST;
		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
		tp->tg3_flags2 |= TG3_FLG2_FLASH;

		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
		case FLASH_5717VENDOR_ST_M_M25PE20:
		case FLASH_5717VENDOR_ST_A_M25PE20:
		case FLASH_5717VENDOR_ST_M_M45PE20:
		case FLASH_5717VENDOR_ST_A_M45PE20:
			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
			break;
		default:
			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
			break;
		}
M
Matt Carlson 已提交
11747
		break;
11748 11749 11750
	default:
		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
		return;
M
Matt Carlson 已提交
11751
	}
11752 11753 11754 11755

	tg3_nvram_get_pagesize(tp, nvcfg1);
	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
M
Matt Carlson 已提交
11756 11757
}

L
Linus Torvalds 已提交
11758 11759 11760 11761 11762 11763 11764 11765
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void __devinit tg3_nvram_init(struct tg3 *tp)
{
	tw32_f(GRC_EEPROM_ADDR,
	     (EEPROM_ADDR_FSM_RESET |
	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
	       EEPROM_ADDR_CLKPERD_SHIFT)));

M
Michael Chan 已提交
11766
	msleep(1);
L
Linus Torvalds 已提交
11767 11768 11769 11770 11771 11772 11773 11774 11775 11776

	/* Enable seeprom accesses. */
	tw32_f(GRC_LOCAL_CTRL,
	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
	udelay(100);

	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
		tp->tg3_flags |= TG3_FLAG_NVRAM;

M
Michael Chan 已提交
11777
		if (tg3_nvram_lock(tp)) {
11778 11779
			netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
				    __func__);
M
Michael Chan 已提交
11780 11781
			return;
		}
11782
		tg3_enable_nvram_access(tp);
L
Linus Torvalds 已提交
11783

M
Matt Carlson 已提交
11784 11785
		tp->nvram_size = 0;

11786 11787
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
			tg3_get_5752_nvram_info(tp);
M
Michael Chan 已提交
11788 11789
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
			tg3_get_5755_nvram_info(tp);
M
Matt Carlson 已提交
11790
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
M
Matt Carlson 已提交
11791 11792
			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
M
Michael Chan 已提交
11793
			tg3_get_5787_nvram_info(tp);
11794 11795
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
			tg3_get_5761_nvram_info(tp);
M
Michael Chan 已提交
11796 11797
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
			tg3_get_5906_nvram_info(tp);
M
Matt Carlson 已提交
11798 11799
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
11800
			tg3_get_57780_nvram_info(tp);
11801 11802
		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
			tg3_get_5717_nvram_info(tp);
11803 11804 11805
		else
			tg3_get_nvram_info(tp);

M
Matt Carlson 已提交
11806 11807
		if (tp->nvram_size == 0)
			tg3_get_nvram_size(tp);
L
Linus Torvalds 已提交
11808

11809
		tg3_disable_nvram_access(tp);
M
Michael Chan 已提交
11810
		tg3_nvram_unlock(tp);
L
Linus Torvalds 已提交
11811 11812 11813 11814 11815 11816 11817 11818 11819 11820 11821 11822 11823 11824 11825

	} else {
		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);

		tg3_get_eeprom_size(tp);
	}
}

static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
				    u32 offset, u32 len, u8 *buf)
{
	int i, j, rc = 0;
	u32 val;

	for (i = 0; i < len; i += 4) {
A
Al Viro 已提交
11826
		u32 addr;
11827
		__be32 data;
L
Linus Torvalds 已提交
11828 11829 11830 11831 11832

		addr = offset + i;

		memcpy(&data, buf + i, 4);

M
Matt Carlson 已提交
11833 11834 11835 11836 11837 11838 11839
		/*
		 * The SEEPROM interface expects the data to always be opposite
		 * the native endian format.  We accomplish this by reversing
		 * all the operations that would have been performed on the
		 * data from a call to tg3_nvram_read_be32().
		 */
		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
L
Linus Torvalds 已提交
11840 11841 11842 11843 11844 11845 11846 11847 11848 11849 11850

		val = tr32(GRC_EEPROM_ADDR);
		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);

		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
			EEPROM_ADDR_READ);
		tw32(GRC_EEPROM_ADDR, val |
			(0 << EEPROM_ADDR_DEVID_SHIFT) |
			(addr & EEPROM_ADDR_ADDR_MASK) |
			EEPROM_ADDR_START |
			EEPROM_ADDR_WRITE);
11851

M
Michael Chan 已提交
11852
		for (j = 0; j < 1000; j++) {
L
Linus Torvalds 已提交
11853 11854 11855 11856
			val = tr32(GRC_EEPROM_ADDR);

			if (val & EEPROM_ADDR_COMPLETE)
				break;
M
Michael Chan 已提交
11857
			msleep(1);
L
Linus Torvalds 已提交
11858 11859 11860 11861 11862 11863 11864 11865 11866 11867 11868 11869 11870 11871 11872 11873 11874 11875 11876 11877 11878 11879 11880 11881 11882 11883
		}
		if (!(val & EEPROM_ADDR_COMPLETE)) {
			rc = -EBUSY;
			break;
		}
	}

	return rc;
}

/* offset and length are dword aligned */
static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
		u8 *buf)
{
	int ret = 0;
	u32 pagesize = tp->nvram_pagesize;
	u32 pagemask = pagesize - 1;
	u32 nvram_cmd;
	u8 *tmp;

	tmp = kmalloc(pagesize, GFP_KERNEL);
	if (tmp == NULL)
		return -ENOMEM;

	while (len) {
		int j;
11884
		u32 phy_addr, page_off, size;
L
Linus Torvalds 已提交
11885 11886

		phy_addr = offset & ~pagemask;
11887

L
Linus Torvalds 已提交
11888
		for (j = 0; j < pagesize; j += 4) {
11889 11890 11891
			ret = tg3_nvram_read_be32(tp, phy_addr + j,
						  (__be32 *) (tmp + j));
			if (ret)
L
Linus Torvalds 已提交
11892 11893 11894 11895 11896 11897 11898 11899 11900 11901 11902 11903 11904 11905 11906 11907
				break;
		}
		if (ret)
			break;

	        page_off = offset & pagemask;
		size = pagesize;
		if (len < size)
			size = len;

		len -= size;

		memcpy(tmp + page_off, buf, size);

		offset = offset + (pagesize - page_off);

11908
		tg3_enable_nvram_access(tp);
L
Linus Torvalds 已提交
11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922 11923 11924 11925 11926 11927 11928 11929 11930 11931 11932 11933 11934

		/*
		 * Before we can erase the flash page, we need
		 * to issue a special "write enable" command.
		 */
		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;

		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
			break;

		/* Erase the target page */
		tw32(NVRAM_ADDR, phy_addr);

		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;

	        if (tg3_nvram_exec_cmd(tp, nvram_cmd))
			break;

		/* Issue another write enable to start the write. */
		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;

		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
			break;

		for (j = 0; j < pagesize; j += 4) {
A
Al Viro 已提交
11935
			__be32 data;
L
Linus Torvalds 已提交
11936

A
Al Viro 已提交
11937
			data = *((__be32 *) (tmp + j));
11938

A
Al Viro 已提交
11939
			tw32(NVRAM_WRDATA, be32_to_cpu(data));
L
Linus Torvalds 已提交
11940 11941 11942 11943 11944 11945 11946 11947 11948 11949 11950 11951 11952 11953 11954 11955 11956 11957 11958 11959 11960 11961 11962 11963 11964 11965 11966 11967 11968 11969 11970 11971 11972

			tw32(NVRAM_ADDR, phy_addr + j);

			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
				NVRAM_CMD_WR;

			if (j == 0)
				nvram_cmd |= NVRAM_CMD_FIRST;
			else if (j == (pagesize - 4))
				nvram_cmd |= NVRAM_CMD_LAST;

			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
				break;
		}
		if (ret)
			break;
	}

	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
	tg3_nvram_exec_cmd(tp, nvram_cmd);

	kfree(tmp);

	return ret;
}

/* offset and length are dword aligned */
static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
		u8 *buf)
{
	int i, ret = 0;

	for (i = 0; i < len; i += 4, offset += 4) {
A
Al Viro 已提交
11973 11974
		u32 page_off, phy_addr, nvram_cmd;
		__be32 data;
L
Linus Torvalds 已提交
11975 11976

		memcpy(&data, buf + i, 4);
A
Al Viro 已提交
11977
		tw32(NVRAM_WRDATA, be32_to_cpu(data));
L
Linus Torvalds 已提交
11978 11979 11980

	        page_off = offset % tp->nvram_pagesize;

M
Michael Chan 已提交
11981
		phy_addr = tg3_nvram_phys_addr(tp, offset);
L
Linus Torvalds 已提交
11982 11983 11984 11985 11986 11987 11988

		tw32(NVRAM_ADDR, phy_addr);

		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;

	        if ((page_off == 0) || (i == 0))
			nvram_cmd |= NVRAM_CMD_FIRST;
M
Michael Chan 已提交
11989
		if (page_off == (tp->nvram_pagesize - 4))
L
Linus Torvalds 已提交
11990 11991 11992 11993 11994
			nvram_cmd |= NVRAM_CMD_LAST;

		if (i == (len - 4))
			nvram_cmd |= NVRAM_CMD_LAST;

M
Matt Carlson 已提交
11995 11996
		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
		    !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
M
Michael Chan 已提交
11997 11998
		    (tp->nvram_jedecnum == JEDEC_ST) &&
		    (nvram_cmd & NVRAM_CMD_FIRST)) {
L
Linus Torvalds 已提交
11999 12000 12001 12002 12003 12004 12005 12006 12007 12008 12009 12010 12011 12012 12013 12014 12015 12016 12017 12018 12019 12020 12021 12022

			if ((ret = tg3_nvram_exec_cmd(tp,
				NVRAM_CMD_WREN | NVRAM_CMD_GO |
				NVRAM_CMD_DONE)))

				break;
		}
		if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
			/* We always do complete word writes to eeprom. */
			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
		}

		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
			break;
	}
	return ret;
}

/* offset and length are dword aligned */
static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
{
	int ret;

	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
M
Michael Chan 已提交
12023 12024
		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
L
Linus Torvalds 已提交
12025 12026 12027 12028 12029 12030 12031 12032 12033
		udelay(40);
	}

	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
	}
	else {
		u32 grc_mode;

M
Michael Chan 已提交
12034 12035 12036
		ret = tg3_nvram_lock(tp);
		if (ret)
			return ret;
L
Linus Torvalds 已提交
12037

12038 12039
		tg3_enable_nvram_access(tp);
		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12040
		    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
L
Linus Torvalds 已提交
12041 12042 12043 12044 12045 12046 12047 12048 12049 12050 12051 12052 12053 12054 12055 12056 12057 12058 12059
			tw32(NVRAM_WRITE1, 0x406);

		grc_mode = tr32(GRC_MODE);
		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);

		if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
			!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {

			ret = tg3_nvram_write_block_buffered(tp, offset, len,
				buf);
		}
		else {
			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
				buf);
		}

		grc_mode = tr32(GRC_MODE);
		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);

12060
		tg3_disable_nvram_access(tp);
L
Linus Torvalds 已提交
12061 12062 12063 12064
		tg3_nvram_unlock(tp);
	}

	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
M
Michael Chan 已提交
12065
		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
L
Linus Torvalds 已提交
12066 12067 12068 12069 12070 12071 12072 12073 12074 12075 12076
		udelay(40);
	}

	return ret;
}

struct subsys_tbl_ent {
	u16 subsys_vendor, subsys_devid;
	u32 phy_id;
};

12077
static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
L
Linus Torvalds 已提交
12078
	/* Broadcom boards. */
12079
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12080
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12081
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12082
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12083
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12084
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12085 12086 12087
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12088
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12089
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12090
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12091 12092 12093
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12094
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12095
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12096
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12097
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12098
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12099
	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12100
	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
L
Linus Torvalds 已提交
12101 12102

	/* 3com boards. */
12103
	{ TG3PCI_SUBVENDOR_ID_3COM,
12104
	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12105
	{ TG3PCI_SUBVENDOR_ID_3COM,
12106
	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12107 12108 12109
	{ TG3PCI_SUBVENDOR_ID_3COM,
	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
	{ TG3PCI_SUBVENDOR_ID_3COM,
12110
	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12111
	{ TG3PCI_SUBVENDOR_ID_3COM,
12112
	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
L
Linus Torvalds 已提交
12113 12114

	/* DELL boards. */
12115
	{ TG3PCI_SUBVENDOR_ID_DELL,
12116
	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12117
	{ TG3PCI_SUBVENDOR_ID_DELL,
12118
	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12119
	{ TG3PCI_SUBVENDOR_ID_DELL,
12120
	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12121
	{ TG3PCI_SUBVENDOR_ID_DELL,
12122
	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
L
Linus Torvalds 已提交
12123 12124

	/* Compaq boards. */
12125
	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12126
	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12127
	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12128
	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12129 12130 12131
	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12132
	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12133
	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12134
	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
L
Linus Torvalds 已提交
12135 12136

	/* IBM boards. */
12137 12138
	{ TG3PCI_SUBVENDOR_ID_IBM,
	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
L
Linus Torvalds 已提交
12139 12140
};

12141
static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
L
Linus Torvalds 已提交
12142 12143 12144 12145 12146 12147 12148 12149 12150 12151 12152 12153 12154
{
	int i;

	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
		if ((subsys_id_to_phy_id[i].subsys_vendor ==
		     tp->pdev->subsystem_vendor) &&
		    (subsys_id_to_phy_id[i].subsys_devid ==
		     tp->pdev->subsystem_device))
			return &subsys_id_to_phy_id[i];
	}
	return NULL;
}

12155
static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
L
Linus Torvalds 已提交
12156 12157
{
	u32 val;
12158 12159 12160 12161 12162 12163 12164 12165 12166
	u16 pmcsr;

	/* On some early chips the SRAM cannot be accessed in D3hot state,
	 * so need make sure we're in D0.
	 */
	pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
	pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
	msleep(1);
12167 12168 12169 12170 12171 12172

	/* Make sure register accesses (indirect or otherwise)
	 * will function correctly.
	 */
	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
			       tp->misc_host_ctrl);
L
Linus Torvalds 已提交
12173

12174 12175 12176 12177 12178 12179 12180 12181
	/* The memory arbiter has to be enabled in order for SRAM accesses
	 * to succeed.  Normally on powerup the tg3 chip firmware will make
	 * sure it is enabled, but other entities such as system netboot
	 * code might disable it.
	 */
	val = tr32(MEMARB_MODE);
	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);

12182
	tp->phy_id = TG3_PHY_ID_INVALID;
12183 12184
	tp->led_ctrl = LED_CTRL_MODE_PHY_1;

G
Gary Zambrano 已提交
12185 12186
	/* Assume an onboard device and WOL capable by default.  */
	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12187

M
Michael Chan 已提交
12188
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
M
Michael Chan 已提交
12189
		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
M
Michael Chan 已提交
12190
			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
M
Michael Chan 已提交
12191 12192
			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
		}
M
Matt Carlson 已提交
12193 12194
		val = tr32(VCPU_CFGSHDW);
		if (val & VCPU_CFGSHDW_ASPM_DBNC)
M
Matt Carlson 已提交
12195
			tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
M
Matt Carlson 已提交
12196
		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
M
Matt Carlson 已提交
12197
		    (val & VCPU_CFGSHDW_WOL_MAGPKT))
M
Matt Carlson 已提交
12198
			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12199
		goto done;
M
Michael Chan 已提交
12200 12201
	}

L
Linus Torvalds 已提交
12202 12203 12204
	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
		u32 nic_cfg, led_cfg;
M
Matt Carlson 已提交
12205
		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12206
		int eeprom_phy_serdes = 0;
L
Linus Torvalds 已提交
12207 12208 12209 12210 12211 12212 12213 12214 12215 12216 12217 12218

		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
		tp->nic_sram_data_cfg = nic_cfg;

		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
		ver >>= NIC_SRAM_DATA_VER_SHIFT;
		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
		    (ver > 0) && (ver < 0x100))
			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);

M
Matt Carlson 已提交
12219 12220 12221
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);

L
Linus Torvalds 已提交
12222 12223 12224 12225 12226 12227 12228 12229 12230 12231 12232 12233 12234 12235 12236
		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
			eeprom_phy_serdes = 1;

		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
		if (nic_phy_id != 0) {
			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;

			eeprom_phy_id  = (id1 >> 16) << 10;
			eeprom_phy_id |= (id2 & 0xfc00) << 16;
			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
		} else
			eeprom_phy_id = 0;

12237
		tp->phy_id = eeprom_phy_id;
M
Michael Chan 已提交
12238
		if (eeprom_phy_serdes) {
12239 12240
			if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
M
Michael Chan 已提交
12241 12242 12243 12244
				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
			else
				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
		}
12245

12246
		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
L
Linus Torvalds 已提交
12247 12248
			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
				    SHASTA_EXT_LED_MODE_MASK);
12249
		else
L
Linus Torvalds 已提交
12250 12251 12252 12253 12254 12255 12256 12257 12258 12259 12260 12261 12262 12263
			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;

		switch (led_cfg) {
		default:
		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
			break;

		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
			break;

		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
			tp->led_ctrl = LED_CTRL_MODE_MAC;
M
Michael Chan 已提交
12264 12265 12266 12267 12268 12269 12270 12271 12272 12273

			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
			 * read on some older 5700/5701 bootcode.
			 */
			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
			    ASIC_REV_5700 ||
			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
			    ASIC_REV_5701)
				tp->led_ctrl = LED_CTRL_MODE_PHY_1;

L
Linus Torvalds 已提交
12274 12275 12276 12277 12278 12279 12280 12281 12282 12283 12284 12285 12286 12287 12288 12289 12290 12291 12292 12293 12294
			break;

		case SHASTA_EXT_LED_SHARED:
			tp->led_ctrl = LED_CTRL_MODE_SHARED;
			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
						 LED_CTRL_MODE_PHY_2);
			break;

		case SHASTA_EXT_LED_MAC:
			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
			break;

		case SHASTA_EXT_LED_COMBO:
			tp->led_ctrl = LED_CTRL_MODE_COMBO;
			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
						 LED_CTRL_MODE_PHY_2);
			break;

12295
		}
L
Linus Torvalds 已提交
12296 12297 12298 12299 12300 12301

		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
			tp->led_ctrl = LED_CTRL_MODE_PHY_2;

12302 12303
		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12304

M
Michael Chan 已提交
12305
		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
L
Linus Torvalds 已提交
12306
			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
M
Michael Chan 已提交
12307 12308 12309 12310 12311 12312
			if ((tp->pdev->subsystem_vendor ==
			     PCI_VENDOR_ID_ARIMA) &&
			    (tp->pdev->subsystem_device == 0x205a ||
			     tp->pdev->subsystem_device == 0x2063))
				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
		} else {
12313
			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
M
Michael Chan 已提交
12314 12315
			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
		}
L
Linus Torvalds 已提交
12316 12317 12318

		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12319
			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
L
Linus Torvalds 已提交
12320 12321
				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
		}
M
Matt Carlson 已提交
12322 12323 12324

		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
			(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
M
Matt Carlson 已提交
12325
			tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
M
Matt Carlson 已提交
12326

G
Gary Zambrano 已提交
12327 12328 12329
		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
L
Linus Torvalds 已提交
12330

12331
		if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12332
		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
M
Matt Carlson 已提交
12333 12334
			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;

L
Linus Torvalds 已提交
12335 12336 12337 12338 12339 12340 12341
		if (cfg2 & (1 << 17))
			tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;

		/* serdes signal pre-emphasis in register 0x590 set by */
		/* bootcode if bit 18 is set */
		if (cfg2 & (1 << 18))
			tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
M
Matt Carlson 已提交
12342

M
Matt Carlson 已提交
12343 12344
		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12345 12346 12347
		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
			tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;

M
Matt Carlson 已提交
12348 12349 12350 12351 12352 12353 12354
		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
			u32 cfg3;

			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
				tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
		}
M
Matt Carlson 已提交
12355

12356 12357
		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
			tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
M
Matt Carlson 已提交
12358 12359 12360 12361
		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
L
Linus Torvalds 已提交
12362
	}
12363 12364 12365 12366
done:
	device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
	device_set_wakeup_enable(&tp->pdev->dev,
				 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12367 12368
}

12369 12370 12371 12372 12373 12374 12375 12376 12377 12378 12379 12380 12381 12382 12383 12384 12385 12386 12387 12388 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12401 12402 12403 12404 12405 12406 12407 12408 12409 12410 12411 12412 12413 12414 12415 12416 12417
static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
{
	int i;
	u32 val;

	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
	tw32(OTP_CTRL, cmd);

	/* Wait for up to 1 ms for command to execute. */
	for (i = 0; i < 100; i++) {
		val = tr32(OTP_STATUS);
		if (val & OTP_STATUS_CMD_DONE)
			break;
		udelay(10);
	}

	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
}

/* Read the gphy configuration from the OTP region of the chip.  The gphy
 * configuration is a 32-bit value that straddles the alignment boundary.
 * We do two 32-bit reads and then shift and merge the results.
 */
static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
{
	u32 bhalf_otp, thalf_otp;

	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);

	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
		return 0;

	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);

	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
		return 0;

	thalf_otp = tr32(OTP_READ_DATA);

	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);

	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
		return 0;

	bhalf_otp = tr32(OTP_READ_DATA);

	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
}

12418 12419 12420 12421 12422
static int __devinit tg3_phy_probe(struct tg3 *tp)
{
	u32 hw_phy_id_1, hw_phy_id_2;
	u32 hw_phy_id, hw_phy_id_masked;
	int err;
L
Linus Torvalds 已提交
12423

M
Matt Carlson 已提交
12424 12425 12426
	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
		return tg3_phy_init(tp);

L
Linus Torvalds 已提交
12427
	/* Reading the PHY ID register can conflict with ASF
12428
	 * firmware access to the PHY hardware.
L
Linus Torvalds 已提交
12429 12430
	 */
	err = 0;
M
Matt Carlson 已提交
12431 12432
	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
	    (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12433
		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
L
Linus Torvalds 已提交
12434 12435 12436 12437 12438 12439 12440 12441 12442 12443 12444 12445 12446
	} else {
		/* Now read the physical PHY_ID from the chip and verify
		 * that it is sane.  If it doesn't look good, we fall back
		 * to either the hard-coded table based PHY_ID and failing
		 * that the value found in the eeprom area.
		 */
		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);

		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;

12447
		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
L
Linus Torvalds 已提交
12448 12449
	}

12450
	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
L
Linus Torvalds 已提交
12451
		tp->phy_id = hw_phy_id;
12452
		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
L
Linus Torvalds 已提交
12453
			tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
M
Michael Chan 已提交
12454 12455
		else
			tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
L
Linus Torvalds 已提交
12456
	} else {
12457
		if (tp->phy_id != TG3_PHY_ID_INVALID) {
12458 12459 12460
			/* Do nothing, phy ID already set up in
			 * tg3_get_eeprom_hw_cfg().
			 */
L
Linus Torvalds 已提交
12461 12462 12463 12464 12465 12466
		} else {
			struct subsys_tbl_ent *p;

			/* No eeprom signature?  Try the hardcoded
			 * subsys device table.
			 */
12467
			p = tg3_lookup_by_subsys(tp);
L
Linus Torvalds 已提交
12468 12469 12470 12471 12472
			if (!p)
				return -ENODEV;

			tp->phy_id = p->phy_id;
			if (!tp->phy_id ||
12473
			    tp->phy_id == TG3_PHY_ID_BCM8002)
L
Linus Torvalds 已提交
12474 12475 12476 12477
				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
		}
	}

M
Michael Chan 已提交
12478
	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
M
Matt Carlson 已提交
12479
	    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
L
Linus Torvalds 已提交
12480
	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12481
		u32 bmsr, adv_reg, tg3_ctrl, mask;
L
Linus Torvalds 已提交
12482 12483 12484 12485 12486

		tg3_readphy(tp, MII_BMSR, &bmsr);
		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
		    (bmsr & BMSR_LSTATUS))
			goto skip_phy_reset;
12487

L
Linus Torvalds 已提交
12488 12489 12490 12491 12492 12493 12494 12495 12496 12497 12498 12499 12500 12501 12502 12503 12504
		err = tg3_phy_reset(tp);
		if (err)
			return err;

		adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
			   ADVERTISE_100HALF | ADVERTISE_100FULL |
			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
		tg3_ctrl = 0;
		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
				    MII_TG3_CTRL_ADV_1000_FULL);
			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
				tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
					     MII_TG3_CTRL_ENABLE_AS_MASTER);
		}

12505 12506 12507 12508
		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
		if (!tg3_copper_is_advertising_all(tp, mask)) {
L
Linus Torvalds 已提交
12509 12510 12511 12512 12513 12514 12515 12516 12517 12518 12519 12520 12521 12522 12523 12524
			tg3_writephy(tp, MII_ADVERTISE, adv_reg);

			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);

			tg3_writephy(tp, MII_BMCR,
				     BMCR_ANENABLE | BMCR_ANRESTART);
		}
		tg3_phy_set_wirespeed(tp);

		tg3_writephy(tp, MII_ADVERTISE, adv_reg);
		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
	}

skip_phy_reset:
12525
	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
L
Linus Torvalds 已提交
12526 12527 12528 12529 12530 12531 12532
		err = tg3_init_5401phy_dsp(tp);
		if (err)
			return err;

		err = tg3_init_5401phy_dsp(tp);
	}

M
Michael Chan 已提交
12533
	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
L
Linus Torvalds 已提交
12534 12535 12536 12537 12538 12539 12540 12541 12542 12543 12544 12545 12546 12547 12548
		tp->link_config.advertising =
			(ADVERTISED_1000baseT_Half |
			 ADVERTISED_1000baseT_Full |
			 ADVERTISED_Autoneg |
			 ADVERTISED_FIBRE);
	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
		tp->link_config.advertising &=
			~(ADVERTISED_1000baseT_Half |
			  ADVERTISED_1000baseT_Full);

	return err;
}

static void __devinit tg3_read_partno(struct tg3 *tp)
{
12549
	unsigned char vpd_data[TG3_NVM_VPD_LEN];   /* in little-endian format */
12550
	unsigned int i;
M
Michael Chan 已提交
12551
	u32 magic;
L
Linus Torvalds 已提交
12552

M
Matt Carlson 已提交
12553 12554
	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
	    tg3_nvram_read(tp, 0x0, &magic))
12555
		goto out_not_found;
L
Linus Torvalds 已提交
12556

M
Michael Chan 已提交
12557
	if (magic == TG3_EEPROM_MAGIC) {
12558
		for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
M
Michael Chan 已提交
12559
			u32 tmp;
L
Linus Torvalds 已提交
12560

12561 12562 12563 12564
			/* The data is in little-endian format in NVRAM.
			 * Use the big-endian read routines to preserve
			 * the byte order as it exists in NVRAM.
			 */
12565
			if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
M
Michael Chan 已提交
12566 12567
				goto out_not_found;

12568
			memcpy(&vpd_data[i], &tmp, sizeof(tmp));
M
Michael Chan 已提交
12569 12570
		}
	} else {
12571 12572 12573 12574 12575 12576 12577 12578 12579 12580
		ssize_t cnt;
		unsigned int pos = 0, i = 0;

		for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
			cnt = pci_read_vpd(tp->pdev, pos,
					   TG3_NVM_VPD_LEN - pos,
					   &vpd_data[pos]);
			if (cnt == -ETIMEDOUT || -EINTR)
				cnt = 0;
			else if (cnt < 0)
12581
				goto out_not_found;
M
Michael Chan 已提交
12582
		}
12583 12584
		if (pos != TG3_NVM_VPD_LEN)
			goto out_not_found;
L
Linus Torvalds 已提交
12585 12586 12587
	}

	/* Now parse and find the part number. */
12588
	for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
L
Linus Torvalds 已提交
12589
		unsigned char val = vpd_data[i];
12590
		unsigned int block_end;
L
Linus Torvalds 已提交
12591

12592 12593 12594 12595 12596 12597 12598 12599 12600 12601 12602 12603 12604 12605 12606 12607 12608
		if (val & PCI_VPD_LRDT) {
			if (i + PCI_VPD_LRDT_TAG_SIZE > TG3_NVM_VPD_LEN)
				break;

			if (val != PCI_VPD_LRDT_RO_DATA) {
				i += PCI_VPD_LRDT_TAG_SIZE +
				     pci_vpd_lrdt_size(&vpd_data[i]);

				continue;
			}
		} else {
			if ((val & PCI_VPD_SRDT_TIN_MASK) == PCI_VPD_STIN_END)
				break;

			i += PCI_VPD_SRDT_TAG_SIZE +
			     pci_vpd_srdt_size(&vpd_data[i]);

L
Linus Torvalds 已提交
12609 12610 12611
			continue;
		}

12612 12613 12614 12615
		block_end = i + PCI_VPD_LRDT_TAG_SIZE +
			    pci_vpd_lrdt_size(&vpd_data[i]);

		i += PCI_VPD_LRDT_TAG_SIZE;
12616

12617
		if (block_end > TG3_NVM_VPD_LEN)
12618 12619 12620
			goto out_not_found;

		while (i < (block_end - 2)) {
L
Linus Torvalds 已提交
12621 12622 12623 12624
			if (vpd_data[i + 0] == 'P' &&
			    vpd_data[i + 1] == 'N') {
				int partno_len = vpd_data[i + 2];

12625
				i += 3;
12626 12627
				if (partno_len > TG3_BPN_SIZE ||
				    (partno_len + i) > TG3_NVM_VPD_LEN)
L
Linus Torvalds 已提交
12628 12629 12630
					goto out_not_found;

				memcpy(tp->board_part_number,
12631
				       &vpd_data[i], partno_len);
L
Linus Torvalds 已提交
12632 12633 12634 12635

				/* Success. */
				return;
			}
12636
			i += 3 + vpd_data[i + 2];
L
Linus Torvalds 已提交
12637 12638 12639 12640 12641 12642 12643
		}

		/* Part number not found. */
		goto out_not_found;
	}

out_not_found:
M
Michael Chan 已提交
12644 12645
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
		strcpy(tp->board_part_number, "BCM95906");
M
Matt Carlson 已提交
12646 12647 12648 12649 12650 12651 12652 12653 12654
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
		strcpy(tp->board_part_number, "BCM57780");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
		strcpy(tp->board_part_number, "BCM57760");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
		strcpy(tp->board_part_number, "BCM57790");
M
Matt Carlson 已提交
12655 12656 12657
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
		strcpy(tp->board_part_number, "BCM57788");
12658 12659 12660 12661 12662
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
		strcpy(tp->board_part_number, "BCM57761");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
M
Matt Carlson 已提交
12663
		strcpy(tp->board_part_number, "BCM57765");
12664 12665 12666 12667 12668 12669 12670 12671 12672 12673 12674 12675
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
		strcpy(tp->board_part_number, "BCM57781");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
		strcpy(tp->board_part_number, "BCM57785");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
		strcpy(tp->board_part_number, "BCM57791");
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
		strcpy(tp->board_part_number, "BCM57795");
M
Michael Chan 已提交
12676 12677
	else
		strcpy(tp->board_part_number, "none");
L
Linus Torvalds 已提交
12678 12679
}

12680 12681 12682 12683
static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
{
	u32 val;

12684
	if (tg3_nvram_read(tp, offset, &val) ||
12685
	    (val & 0xfc000000) != 0x0c000000 ||
12686
	    tg3_nvram_read(tp, offset + 4, &val) ||
12687 12688 12689 12690 12691 12692
	    val != 0)
		return 0;

	return 1;
}

12693 12694
static void __devinit tg3_read_bc_ver(struct tg3 *tp)
{
12695
	u32 val, offset, start, ver_offset;
12696
	int i;
12697
	bool newver = false;
12698 12699 12700 12701 12702 12703 12704

	if (tg3_nvram_read(tp, 0xc, &offset) ||
	    tg3_nvram_read(tp, 0x4, &start))
		return;

	offset = tg3_nvram_logical_addr(tp, offset);

12705
	if (tg3_nvram_read(tp, offset, &val))
12706 12707
		return;

12708 12709
	if ((val & 0xfc000000) == 0x0c000000) {
		if (tg3_nvram_read(tp, offset + 4, &val))
12710 12711
			return;

12712 12713 12714 12715 12716 12717 12718 12719 12720 12721 12722 12723 12724 12725 12726 12727 12728 12729 12730 12731 12732 12733 12734 12735 12736 12737
		if (val == 0)
			newver = true;
	}

	if (newver) {
		if (tg3_nvram_read(tp, offset + 8, &ver_offset))
			return;

		offset = offset + ver_offset - start;
		for (i = 0; i < 16; i += 4) {
			__be32 v;
			if (tg3_nvram_read_be32(tp, offset + i, &v))
				return;

			memcpy(tp->fw_ver + i, &v, sizeof(v));
		}
	} else {
		u32 major, minor;

		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
			return;

		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
			TG3_NVM_BCVER_MAJSFT;
		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
		snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12738 12739 12740
	}
}

12741 12742 12743 12744 12745 12746 12747 12748 12749 12750 12751 12752 12753 12754 12755 12756
static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
{
	u32 val, major, minor;

	/* Use native endian representation */
	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
		return;

	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
		TG3_NVM_HWSB_CFG1_MAJSFT;
	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
		TG3_NVM_HWSB_CFG1_MINSFT;

	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
}

12757 12758 12759 12760 12761 12762 12763 12764 12765 12766 12767 12768 12769 12770 12771 12772 12773 12774 12775 12776 12777
static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
{
	u32 offset, major, minor, build;

	tp->fw_ver[0] = 's';
	tp->fw_ver[1] = 'b';
	tp->fw_ver[2] = '\0';

	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
		return;

	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
	case TG3_EEPROM_SB_REVISION_0:
		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
		break;
	case TG3_EEPROM_SB_REVISION_2:
		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
		break;
	case TG3_EEPROM_SB_REVISION_3:
		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
		break;
12778 12779 12780 12781 12782 12783
	case TG3_EEPROM_SB_REVISION_4:
		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
		break;
	case TG3_EEPROM_SB_REVISION_5:
		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
		break;
12784 12785 12786 12787
	default:
		return;
	}

12788
	if (tg3_nvram_read(tp, offset, &val))
12789 12790 12791 12792 12793 12794 12795 12796 12797 12798 12799 12800 12801 12802 12803 12804 12805 12806 12807
		return;

	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
		TG3_EEPROM_SB_EDH_BLD_SHFT;
	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
		TG3_EEPROM_SB_EDH_MAJ_SHFT;
	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;

	if (minor > 99 || build > 26)
		return;

	snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);

	if (build > 0) {
		tp->fw_ver[8] = 'a' + build - 1;
		tp->fw_ver[9] = '\0';
	}
}

12808
static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
M
Michael Chan 已提交
12809 12810
{
	u32 val, offset, start;
12811
	int i, vlen;
12812 12813 12814 12815

	for (offset = TG3_NVM_DIR_START;
	     offset < TG3_NVM_DIR_END;
	     offset += TG3_NVM_DIRENT_SIZE) {
12816
		if (tg3_nvram_read(tp, offset, &val))
M
Michael Chan 已提交
12817 12818
			return;

12819 12820 12821 12822 12823 12824 12825 12826 12827
		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
			break;
	}

	if (offset == TG3_NVM_DIR_END)
		return;

	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
		start = 0x08000000;
12828
	else if (tg3_nvram_read(tp, offset - 4, &start))
12829 12830
		return;

12831
	if (tg3_nvram_read(tp, offset + 4, &offset) ||
12832
	    !tg3_fw_img_is_valid(tp, offset) ||
12833
	    tg3_nvram_read(tp, offset + 8, &val))
12834 12835 12836 12837
		return;

	offset += val - start;

12838
	vlen = strlen(tp->fw_ver);
12839

12840 12841
	tp->fw_ver[vlen++] = ',';
	tp->fw_ver[vlen++] = ' ';
12842 12843

	for (i = 0; i < 4; i++) {
12844 12845
		__be32 v;
		if (tg3_nvram_read_be32(tp, offset, &v))
M
Michael Chan 已提交
12846 12847
			return;

A
Al Viro 已提交
12848
		offset += sizeof(v);
M
Michael Chan 已提交
12849

12850 12851
		if (vlen > TG3_VER_SIZE - sizeof(v)) {
			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12852
			break;
M
Michael Chan 已提交
12853
		}
12854

12855 12856
		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
		vlen += sizeof(v);
M
Michael Chan 已提交
12857
	}
12858 12859
}

12860 12861 12862 12863 12864 12865 12866 12867 12868 12869 12870 12871 12872 12873 12874 12875 12876 12877 12878 12879 12880 12881 12882 12883 12884 12885 12886 12887
static void __devinit tg3_read_dash_ver(struct tg3 *tp)
{
	int vlen;
	u32 apedata;

	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
		return;

	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
	if (apedata != APE_SEG_SIG_MAGIC)
		return;

	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
	if (!(apedata & APE_FW_STATUS_READY))
		return;

	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);

	vlen = strlen(tp->fw_ver);

	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
		 (apedata & APE_FW_VERSION_BLDMSK));
}

12888 12889 12890 12891
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
	u32 val;

M
Matt Carlson 已提交
12892 12893 12894 12895 12896 12897 12898 12899
	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
		tp->fw_ver[0] = 's';
		tp->fw_ver[1] = 'b';
		tp->fw_ver[2] = '\0';

		return;
	}

12900 12901 12902 12903 12904 12905 12906
	if (tg3_nvram_read(tp, 0, &val))
		return;

	if (val == TG3_EEPROM_MAGIC)
		tg3_read_bc_ver(tp);
	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
		tg3_read_sb_ver(tp, val);
12907 12908
	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
		tg3_read_hwsb_ver(tp);
12909 12910 12911 12912 12913 12914 12915 12916
	else
		return;

	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
	     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
		return;

	tg3_read_mgmtfw_ver(tp);
12917 12918

	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
M
Michael Chan 已提交
12919 12920
}

12921 12922
static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);

L
Linus Torvalds 已提交
12923 12924 12925 12926 12927
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
	static struct pci_device_id write_reorder_chipsets[] = {
		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
		             PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12928 12929
		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
		             PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12930 12931
		{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
			     PCI_DEVICE_ID_VIA_8385_0) },
L
Linus Torvalds 已提交
12932 12933 12934 12935 12936 12937
		{ },
	};
	u32 misc_ctrl_reg;
	u32 pci_state_reg, grc_misc_cfg;
	u32 val;
	u16 pci_cmd;
12938
	int err;
L
Linus Torvalds 已提交
12939 12940 12941 12942 12943 12944 12945 12946 12947 12948 12949 12950 12951 12952 12953 12954 12955 12956 12957 12958 12959 12960 12961

	/* Force memory write invalidate off.  If we leave it on,
	 * then on 5700_BX chips we have to enable a workaround.
	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
	 * to match the cacheline size.  The Broadcom driver have this
	 * workaround but turns MWI off all the times so never uses
	 * it.  This seems to suggest that the workaround is insufficient.
	 */
	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);

	/* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
	 * has the register indirect write enable bit set before
	 * we try to access any of the MMIO registers.  It is also
	 * critical that the PCI-X hw workaround situation is decided
	 * before that as well.
	 */
	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
			      &misc_ctrl_reg);

	tp->pci_chip_rev_id = (misc_ctrl_reg >>
			       MISC_HOST_CTRL_CHIPREV_SHIFT);
12962 12963 12964
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
		u32 prod_id_asic_rev;

M
Matt Carlson 已提交
12965 12966 12967
		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
M
Matt Carlson 已提交
12968 12969 12970
			pci_read_config_dword(tp->pdev,
					      TG3PCI_GEN2_PRODID_ASICREV,
					      &prod_id_asic_rev);
M
Matt Carlson 已提交
12971 12972 12973 12974 12975 12976 12977 12978 12979
		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
			pci_read_config_dword(tp->pdev,
					      TG3PCI_GEN15_PRODID_ASICREV,
					      &prod_id_asic_rev);
M
Matt Carlson 已提交
12980 12981 12982 12983
		else
			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
					      &prod_id_asic_rev);

M
Matt Carlson 已提交
12984
		tp->pci_chip_rev_id = prod_id_asic_rev;
12985
	}
L
Linus Torvalds 已提交
12986

M
Michael Chan 已提交
12987 12988 12989 12990 12991 12992
	/* Wrong chip ID in 5752 A0. This code can be removed later
	 * as A0 is not in production.
	 */
	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;

12993 12994 12995 12996 12997 12998 12999 13000 13001 13002 13003 13004 13005 13006 13007 13008 13009 13010 13011 13012 13013 13014 13015 13016 13017 13018 13019 13020 13021 13022 13023 13024 13025 13026 13027 13028 13029 13030 13031 13032 13033 13034 13035 13036 13037
	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
	 * we need to disable memory and use config. cycles
	 * only to access all registers. The 5702/03 chips
	 * can mistakenly decode the special cycles from the
	 * ICH chipsets as memory write cycles, causing corruption
	 * of register and memory space. Only certain ICH bridges
	 * will drive special cycles with non-zero data during the
	 * address phase which can fall within the 5703's address
	 * range. This is not an ICH bug as the PCI spec allows
	 * non-zero address during special cycles. However, only
	 * these ICH bridges are known to drive non-zero addresses
	 * during special cycles.
	 *
	 * Since special cycles do not cross PCI bridges, we only
	 * enable this workaround if the 5703 is on the secondary
	 * bus of these ICH bridges.
	 */
	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
		static struct tg3_dev_id {
			u32	vendor;
			u32	device;
			u32	rev;
		} ich_chipsets[] = {
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
			  PCI_ANY_ID },
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
			  PCI_ANY_ID },
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
			  0xa },
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
			  PCI_ANY_ID },
			{ },
		};
		struct tg3_dev_id *pci_id = &ich_chipsets[0];
		struct pci_dev *bridge = NULL;

		while (pci_id->vendor != 0) {
			bridge = pci_get_device(pci_id->vendor, pci_id->device,
						bridge);
			if (!bridge) {
				pci_id++;
				continue;
			}
			if (pci_id->rev != PCI_ANY_ID) {
13038
				if (bridge->revision > pci_id->rev)
13039 13040 13041 13042 13043 13044 13045 13046 13047 13048 13049 13050 13051
					continue;
			}
			if (bridge->subordinate &&
			    (bridge->subordinate->number ==
			     tp->pdev->bus->number)) {

				tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
				pci_dev_put(bridge);
				break;
			}
		}
	}

M
Matt Carlson 已提交
13052 13053 13054 13055 13056 13057 13058 13059 13060 13061 13062 13063 13064 13065 13066 13067 13068 13069 13070 13071 13072 13073 13074 13075 13076 13077 13078 13079 13080 13081 13082 13083
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
		static struct tg3_dev_id {
			u32	vendor;
			u32	device;
		} bridge_chipsets[] = {
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
			{ },
		};
		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
		struct pci_dev *bridge = NULL;

		while (pci_id->vendor != 0) {
			bridge = pci_get_device(pci_id->vendor,
						pci_id->device,
						bridge);
			if (!bridge) {
				pci_id++;
				continue;
			}
			if (bridge->subordinate &&
			    (bridge->subordinate->number <=
			     tp->pdev->bus->number) &&
			    (bridge->subordinate->subordinate >=
			     tp->pdev->bus->number)) {
				tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
				pci_dev_put(bridge);
				break;
			}
		}
	}

13084 13085 13086 13087 13088 13089
	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
	 * DMA addresses > 40-bit. This bridge may have other additional
	 * 57xx devices behind it in some 4-port NIC designs for example.
	 * Any tg3 device found behind the bridge will also need the 40-bit
	 * DMA workaround.
	 */
M
Michael Chan 已提交
13090 13091 13092
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
		tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13093
		tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
M
Michael Chan 已提交
13094
		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
M
Michael Chan 已提交
13095
	}
13096 13097 13098 13099 13100 13101 13102 13103 13104 13105 13106 13107 13108 13109 13110 13111 13112 13113
	else {
		struct pci_dev *bridge = NULL;

		do {
			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
						PCI_DEVICE_ID_SERVERWORKS_EPB,
						bridge);
			if (bridge && bridge->subordinate &&
			    (bridge->subordinate->number <=
			     tp->pdev->bus->number) &&
			    (bridge->subordinate->subordinate >=
			     tp->pdev->bus->number)) {
				tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
				pci_dev_put(bridge);
				break;
			}
		} while (bridge);
	}
M
Michael Chan 已提交
13114

L
Linus Torvalds 已提交
13115 13116 13117 13118 13119 13120
	/* Initialize misc host control in PCI block. */
	tp->misc_host_ctrl |= (misc_ctrl_reg &
			       MISC_HOST_CTRL_CHIPREV);
	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
			       tp->misc_host_ctrl);

M
Matt Carlson 已提交
13121 13122 13123
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13124 13125
		tp->pdev_peer = tg3_find_peer(tp);

M
Matt Carlson 已提交
13126 13127
	/* Intentionally exclude ASIC_REV_5906 */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13128
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
M
Matt Carlson 已提交
13129
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
M
Matt Carlson 已提交
13130
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
M
Matt Carlson 已提交
13131
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
M
Matt Carlson 已提交
13132
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
M
Matt Carlson 已提交
13133 13134
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
13135 13136 13137 13138
		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
M
Michael Chan 已提交
13139
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
M
Matt Carlson 已提交
13140
	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
M
Michael Chan 已提交
13141
	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13142 13143
		tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;

13144 13145 13146 13147
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
	    (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
		tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;

M
Matt Carlson 已提交
13148 13149 13150 13151 13152 13153 13154 13155 13156 13157 13158 13159
	/* 5700 B0 chips do not support checksumming correctly due
	 * to hardware bugs.
	 */
	if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
		tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
	else {
		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
		tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
		if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
			tp->dev->features |= NETIF_F_IPV6_CSUM;
	}

M
Matt Carlson 已提交
13160
	/* Determine TSO capabilities */
M
Matt Carlson 已提交
13161 13162
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
13163 13164 13165
		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
M
Matt Carlson 已提交
13166 13167 13168 13169 13170 13171 13172 13173 13174 13175 13176 13177 13178 13179 13180 13181 13182 13183
		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
	else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
			tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
		tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
			tp->fw_needed = FIRMWARE_TG3TSO5;
		else
			tp->fw_needed = FIRMWARE_TG3TSO;
	}

	tp->irq_max = 1;

M
Michael Chan 已提交
13184
	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13185 13186 13187 13188 13189 13190 13191 13192
		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
		     tp->pdev_peer == tp->pdev))
			tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;

M
Matt Carlson 已提交
13193
		if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
M
Michael Chan 已提交
13194
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13195
			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13196
		}
13197

M
Matt Carlson 已提交
13198 13199
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
M
Matt Carlson 已提交
13200 13201 13202
			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
			tp->irq_max = TG3_IRQ_MAX_VECS;
		}
M
Matt Carlson 已提交
13203
	}
13204

13205 13206 13207 13208 13209 13210
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
		tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
	else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
		tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13211
	}
M
Matt Carlson 已提交
13212

M
Matt Carlson 已提交
13213 13214 13215 13216
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;

M
Matt Carlson 已提交
13217
	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
M
Matt Carlson 已提交
13218
	     (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
M
Matt Carlson 已提交
13219
		 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13220
		tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13221

M
Matt Carlson 已提交
13222 13223 13224
	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
			      &pci_state_reg);

13225 13226 13227 13228
	tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
	if (tp->pcie_cap != 0) {
		u16 lnkctl;

L
Linus Torvalds 已提交
13229
		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
M
Matt Carlson 已提交
13230 13231 13232

		pcie_set_readrq(tp->pdev, 4096);

13233 13234 13235 13236 13237
		pci_read_config_word(tp->pdev,
				     tp->pcie_cap + PCI_EXP_LNKCTL,
				     &lnkctl);
		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13238
				tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13239
			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
M
Matt Carlson 已提交
13240
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13241 13242
			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13243
				tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13244 13245
		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
			tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13246
		}
M
Matt Carlson 已提交
13247
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
M
Matt Carlson 已提交
13248
		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
M
Matt Carlson 已提交
13249 13250 13251 13252
	} else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
		   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
		if (!tp->pcix_cap) {
13253
			pr_err("Cannot find PCI-X capability, aborting\n");
M
Matt Carlson 已提交
13254 13255 13256 13257 13258 13259
			return -EIO;
		}

		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
			tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
	}
L
Linus Torvalds 已提交
13260

13261 13262 13263 13264 13265 13266 13267 13268 13269 13270
	/* If we have an AMD 762 or VIA K8T800 chipset, write
	 * reordering to the mailbox registers done by the host
	 * controller can cause major troubles.  We read back from
	 * every mailbox register write to force the writes to be
	 * posted to the chip in order.
	 */
	if (pci_dev_present(write_reorder_chipsets) &&
	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;

13271 13272 13273 13274
	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
			     &tp->pci_cacheline_sz);
	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
			     &tp->pci_lat_timer);
L
Linus Torvalds 已提交
13275 13276 13277
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
	    tp->pci_lat_timer < 64) {
		tp->pci_lat_timer = 64;
13278 13279
		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
				      tp->pci_lat_timer);
L
Linus Torvalds 已提交
13280 13281
	}

M
Matt Carlson 已提交
13282 13283 13284 13285 13286
	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
		/* 5700 BX chips need to have their TX producer index
		 * mailboxes written twice to workaround a bug.
		 */
		tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
L
Linus Torvalds 已提交
13287

M
Matt Carlson 已提交
13288
		/* If we are in PCI-X mode, enable register write workaround.
L
Linus Torvalds 已提交
13289 13290 13291 13292
		 *
		 * The workaround is to use indirect register accesses
		 * for all chip writes not to mailbox registers.
		 */
M
Matt Carlson 已提交
13293
		if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
L
Linus Torvalds 已提交
13294 13295 13296 13297 13298 13299 13300 13301
			u32 pm_reg;

			tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;

			/* The chip can have it's power management PCI config
			 * space registers clobbered due to this bug.
			 * So explicitly force the chip into D0 here.
			 */
M
Matt Carlson 已提交
13302 13303
			pci_read_config_dword(tp->pdev,
					      tp->pm_cap + PCI_PM_CTRL,
L
Linus Torvalds 已提交
13304 13305 13306
					      &pm_reg);
			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
M
Matt Carlson 已提交
13307 13308
			pci_write_config_dword(tp->pdev,
					       tp->pm_cap + PCI_PM_CTRL,
L
Linus Torvalds 已提交
13309 13310 13311 13312 13313 13314 13315 13316 13317 13318 13319 13320 13321 13322 13323 13324 13325 13326 13327 13328 13329
					       pm_reg);

			/* Also, force SERR#/PERR# in PCI command. */
			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
		}
	}

	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
		tp->tg3_flags |= TG3_FLAG_PCI_32BIT;

	/* Chip-specific fixup from Broadcom driver */
	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
	}

13330
	/* Default fast path register access methods */
13331
	tp->read32 = tg3_read32;
13332
	tp->write32 = tg3_write32;
M
Michael Chan 已提交
13333
	tp->read32_mbox = tg3_read32;
13334
	tp->write32_mbox = tg3_write32;
13335 13336 13337 13338 13339 13340
	tp->write32_tx_mbox = tg3_write32;
	tp->write32_rx_mbox = tg3_write32;

	/* Various workaround register access methods */
	if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
		tp->write32 = tg3_write_indirect_reg32;
13341 13342 13343 13344 13345 13346 13347 13348 13349 13350
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
		 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
		/*
		 * Back to back register writes can cause problems on these
		 * chips, the workaround is to read back all reg writes
		 * except those to mailbox regs.
		 *
		 * See tg3_write_indirect_reg32().
		 */
13351
		tp->write32 = tg3_write_flush_reg32;
13352 13353
	}

13354 13355 13356 13357 13358 13359
	if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
	    (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
		tp->write32_tx_mbox = tg3_write32_tx_mbox;
		if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
			tp->write32_rx_mbox = tg3_write_flush_reg32;
	}
13360

13361 13362 13363 13364 13365 13366 13367 13368 13369
	if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
		tp->read32 = tg3_read_indirect_reg32;
		tp->write32 = tg3_write_indirect_reg32;
		tp->read32_mbox = tg3_read_indirect_mbox;
		tp->write32_mbox = tg3_write_indirect_mbox;
		tp->write32_tx_mbox = tg3_write_indirect_mbox;
		tp->write32_rx_mbox = tg3_write_indirect_mbox;

		iounmap(tp->regs);
P
Peter Hagervall 已提交
13370
		tp->regs = NULL;
13371 13372 13373 13374 13375

		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
		pci_cmd &= ~PCI_COMMAND_MEMORY;
		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
	}
M
Michael Chan 已提交
13376 13377 13378 13379 13380 13381
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
		tp->read32_mbox = tg3_read32_mbox_5906;
		tp->write32_mbox = tg3_write32_mbox_5906;
		tp->write32_tx_mbox = tg3_write32_mbox_5906;
		tp->write32_rx_mbox = tg3_write32_mbox_5906;
	}
13382

13383 13384 13385
	if (tp->write32 == tg3_write_indirect_reg32 ||
	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13386
	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13387 13388
		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;

13389
	/* Get eeprom hw config before calling tg3_set_power_state().
M
Michael Chan 已提交
13390
	 * In particular, the TG3_FLG2_IS_NIC flag must be
13391 13392 13393 13394 13395
	 * determined before calling tg3_set_power_state() so that
	 * we know whether or not to switch out of Vaux power.
	 * When the flag is set, it means that GPIO1 is used for eeprom
	 * write protect and also implies that it is a LOM where GPIOs
	 * are not used to switch power.
13396
	 */
13397 13398
	tg3_get_eeprom_hw_cfg(tp);

M
Matt Carlson 已提交
13399 13400 13401 13402 13403 13404 13405 13406 13407 13408
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
		/* Allow reads and writes to the
		 * APE register and memory space.
		 */
		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
				 PCISTATE_ALLOW_APE_SHMEM_WR;
		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
				       pci_state_reg);
	}

M
Matt Carlson 已提交
13409
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
M
Matt Carlson 已提交
13410
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
M
Matt Carlson 已提交
13411
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
M
Matt Carlson 已提交
13412
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
M
Matt Carlson 已提交
13413 13414
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Matt Carlson 已提交
13415 13416
		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;

M
Michael Chan 已提交
13417 13418 13419 13420 13421 13422 13423 13424 13425
	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
	 * GPIO1 driven high will bring 5700's external PHY out of reset.
	 * It is also used as eeprom write protect on LOMs.
	 */
	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
	    (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
				       GRC_LCLCTRL_GPIO_OUTPUT1);
M
Michael Chan 已提交
13426 13427 13428 13429 13430
	/* Unused GPIO3 must be driven as output on 5752 because there
	 * are no pull-up resistors on unused GPIO pins.
	 */
	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
M
Michael Chan 已提交
13431

M
Matt Carlson 已提交
13432
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13433 13434
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
M
Michael Chan 已提交
13435 13436
		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;

13437 13438
	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
M
Matt Carlson 已提交
13439 13440 13441 13442 13443 13444 13445 13446
		/* Turn off the debug UART. */
		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
			/* Keep VMain power. */
			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
					      GRC_LCLCTRL_GPIO_OUTPUT0;
	}

L
Linus Torvalds 已提交
13447
	/* Force the chip into D0. */
M
Michael Chan 已提交
13448
	err = tg3_set_power_state(tp, PCI_D0);
L
Linus Torvalds 已提交
13449
	if (err) {
13450
		pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
L
Linus Torvalds 已提交
13451 13452 13453 13454 13455 13456
		return err;
	}

	/* Derive initial jumbo mode from MTU assigned in
	 * ether_setup() via the alloc_etherdev() call
	 */
13457
	if (tp->dev->mtu > ETH_DATA_LEN &&
M
Michael Chan 已提交
13458
	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13459
		tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
L
Linus Torvalds 已提交
13460 13461 13462 13463 13464 13465 13466 13467 13468 13469 13470

	/* Determine WakeOnLan speed to use. */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
		tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
	} else {
		tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
	}

13471 13472 13473
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
		tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;

L
Linus Torvalds 已提交
13474 13475 13476 13477
	/* A few boards don't want Ethernet@WireSpeed phy feature */
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
M
Michael Chan 已提交
13478
	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13479
	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
M
Michael Chan 已提交
13480
	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
L
Linus Torvalds 已提交
13481 13482 13483 13484 13485 13486 13487 13488
		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;

	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
		tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
		tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;

M
Matt Carlson 已提交
13489
	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13490
	    !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
M
Matt Carlson 已提交
13491
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
M
Matt Carlson 已提交
13492
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
M
Matt Carlson 已提交
13493 13494
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
M
Michael Chan 已提交
13495
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
M
Matt Carlson 已提交
13496
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
M
Matt Carlson 已提交
13497 13498
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13499 13500 13501
			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
				tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13502 13503
			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
				tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
M
Matt Carlson 已提交
13504
		} else
M
Michael Chan 已提交
13505 13506
			tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
	}
L
Linus Torvalds 已提交
13507

13508 13509 13510 13511 13512 13513 13514
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
		tp->phy_otp = tg3_read_otp_phycfg(tp);
		if (tp->phy_otp == 0)
			tp->phy_otp = TG3_OTP_DEFAULT;
	}

M
Matt Carlson 已提交
13515
	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13516 13517 13518 13519
		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
	else
		tp->mi_mode = MAC_MI_MODE_BASE;

L
Linus Torvalds 已提交
13520 13521 13522 13523 13524
	tp->coalesce_mode = 0;
	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;

M
Matt Carlson 已提交
13525 13526
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
M
Matt Carlson 已提交
13527 13528
		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;

M
Matt Carlson 已提交
13529 13530 13531
	err = tg3_mdio_init(tp);
	if (err)
		return err;
L
Linus Torvalds 已提交
13532

13533 13534 13535 13536 13537
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
	    (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
		 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
		return -ENOTSUPP;

L
Linus Torvalds 已提交
13538 13539 13540 13541 13542 13543 13544 13545 13546 13547 13548 13549 13550 13551 13552 13553 13554 13555 13556 13557 13558 13559 13560 13561 13562 13563 13564 13565 13566 13567 13568 13569 13570 13571 13572 13573 13574 13575 13576 13577 13578 13579 13580 13581 13582 13583 13584
	/* Initialize data/descriptor byte/word swapping. */
	val = tr32(GRC_MODE);
	val &= GRC_MODE_HOST_STACKUP;
	tw32(GRC_MODE, val | tp->grc_mode);

	tg3_switch_clocks(tp);

	/* Clear this out for sanity. */
	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);

	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
			      &pci_state_reg);
	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
	    (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);

		if (chiprevid == CHIPREV_ID_5701_A0 ||
		    chiprevid == CHIPREV_ID_5701_B0 ||
		    chiprevid == CHIPREV_ID_5701_B2 ||
		    chiprevid == CHIPREV_ID_5701_B5) {
			void __iomem *sram_base;

			/* Write some dummy words into the SRAM status block
			 * area, see if it reads back correctly.  If the return
			 * value is bad, force enable the PCIX workaround.
			 */
			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;

			writel(0x00000000, sram_base);
			writel(0x00000000, sram_base + 4);
			writel(0xffffffff, sram_base + 4);
			if (readl(sram_base) != 0x00000000)
				tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
		}
	}

	udelay(50);
	tg3_nvram_init(tp);

	grc_misc_cfg = tr32(GRC_MISC_CFG);
	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
		tp->tg3_flags2 |= TG3_FLG2_IS_5788;

13585 13586 13587 13588 13589 13590 13591 13592 13593 13594 13595 13596
	if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
		tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
				      HOSTCC_MODE_CLRTICK_TXBD);

		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
				       tp->misc_host_ctrl);
	}

13597 13598 13599 13600 13601 13602 13603
	/* Preserve the APE MAC_MODE bits */
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
		tp->mac_mode = tr32(MAC_MODE) |
			       MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
	else
		tp->mac_mode = TG3_DEF_MAC_MODE;

L
Linus Torvalds 已提交
13604 13605 13606 13607 13608 13609 13610 13611 13612 13613
	/* these are limited to 10/100 only */
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
M
Michael Chan 已提交
13614 13615
	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
M
Matt Carlson 已提交
13616
	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13617 13618
	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13619
	    (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
L
Linus Torvalds 已提交
13620 13621 13622 13623
		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;

	err = tg3_phy_probe(tp);
	if (err) {
13624
		pr_err("(%s) phy probe failed, err %d\n",
L
Linus Torvalds 已提交
13625 13626
		       pci_name(tp->pdev), err);
		/* ... but do not return immediately ... */
M
Matt Carlson 已提交
13627
		tg3_mdio_fini(tp);
L
Linus Torvalds 已提交
13628 13629 13630
	}

	tg3_read_partno(tp);
M
Michael Chan 已提交
13631
	tg3_read_fw_ver(tp);
L
Linus Torvalds 已提交
13632 13633 13634 13635 13636 13637 13638 13639 13640 13641 13642 13643 13644 13645 13646 13647 13648 13649 13650 13651 13652 13653 13654 13655

	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
		tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
	} else {
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
			tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
		else
			tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
	}

	/* 5700 {AX,BX} chips have a broken status block link
	 * change bit implementation, so we must use the
	 * status register in those cases.
	 */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
	else
		tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;

	/* The led_ctrl is set during tg3_phy_probe, here we might
	 * have to force the link status polling mechanism based
	 * upon subsystem IDs.
	 */
	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13656
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
L
Linus Torvalds 已提交
13657 13658 13659 13660 13661 13662 13663 13664 13665 13666 13667
	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
		tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
				  TG3_FLAG_USE_LINKCHG_REG);
	}

	/* For all SERDES we poll the MAC status register. */
	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
		tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
	else
		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;

M
Matt Carlson 已提交
13668
	tp->rx_offset = NET_IP_ALIGN;
L
Linus Torvalds 已提交
13669 13670 13671 13672
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
		tp->rx_offset = 0;

M
Michael Chan 已提交
13673 13674 13675 13676 13677 13678 13679 13680 13681 13682
	tp->rx_std_max_post = TG3_RX_RING_SIZE;

	/* Increment the rx prod index on the rx std ring by at most
	 * 8 for these chips to workaround hw errata.
	 */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
		tp->rx_std_max_post = 8;

M
Matt Carlson 已提交
13683 13684 13685 13686
	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
				     PCIE_PWR_MGMT_L1_THRESH_MSK;

L
Linus Torvalds 已提交
13687 13688 13689
	return err;
}

13690
#ifdef CONFIG_SPARC
L
Linus Torvalds 已提交
13691 13692 13693 13694
static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
{
	struct net_device *dev = tp->dev;
	struct pci_dev *pdev = tp->pdev;
13695
	struct device_node *dp = pci_device_to_OF_node(pdev);
13696
	const unsigned char *addr;
13697 13698 13699 13700 13701 13702 13703
	int len;

	addr = of_get_property(dp, "local-mac-address", &len);
	if (addr && len == 6) {
		memcpy(dev->dev_addr, addr, 6);
		memcpy(dev->perm_addr, dev->dev_addr, 6);
		return 0;
L
Linus Torvalds 已提交
13704 13705 13706 13707 13708 13709 13710 13711 13712
	}
	return -ENODEV;
}

static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
{
	struct net_device *dev = tp->dev;

	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13713
	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
L
Linus Torvalds 已提交
13714 13715 13716 13717 13718 13719 13720 13721
	return 0;
}
#endif

static int __devinit tg3_get_device_address(struct tg3 *tp)
{
	struct net_device *dev = tp->dev;
	u32 hi, lo, mac_offset;
13722
	int addr_ok = 0;
L
Linus Torvalds 已提交
13723

13724
#ifdef CONFIG_SPARC
L
Linus Torvalds 已提交
13725 13726 13727 13728 13729
	if (!tg3_get_macaddr_sparc(tp))
		return 0;
#endif

	mac_offset = 0x7c;
13730
	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
M
Michael Chan 已提交
13731
	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
L
Linus Torvalds 已提交
13732 13733 13734 13735 13736 13737
		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
			mac_offset = 0xcc;
		if (tg3_nvram_lock(tp))
			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
		else
			tg3_nvram_unlock(tp);
13738 13739 13740 13741
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
		if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
			mac_offset = 0xcc;
	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
M
Michael Chan 已提交
13742
		mac_offset = 0x10;
L
Linus Torvalds 已提交
13743 13744 13745 13746 13747 13748 13749 13750 13751 13752 13753 13754 13755

	/* First try to get it from MAC address mailbox. */
	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
	if ((hi >> 16) == 0x484b) {
		dev->dev_addr[0] = (hi >>  8) & 0xff;
		dev->dev_addr[1] = (hi >>  0) & 0xff;

		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
		dev->dev_addr[2] = (lo >> 24) & 0xff;
		dev->dev_addr[3] = (lo >> 16) & 0xff;
		dev->dev_addr[4] = (lo >>  8) & 0xff;
		dev->dev_addr[5] = (lo >>  0) & 0xff;

13756 13757 13758 13759 13760
		/* Some old bootcode may report a 0 MAC address in SRAM */
		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
	}
	if (!addr_ok) {
		/* Next, try NVRAM. */
M
Matt Carlson 已提交
13761 13762
		if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13763
		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
M
Matt Carlson 已提交
13764 13765
			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13766 13767 13768 13769 13770 13771 13772 13773 13774 13775 13776 13777 13778
		}
		/* Finally just fetch it out of the MAC control regs. */
		else {
			hi = tr32(MAC_ADDR_0_HIGH);
			lo = tr32(MAC_ADDR_0_LOW);

			dev->dev_addr[5] = lo & 0xff;
			dev->dev_addr[4] = (lo >> 8) & 0xff;
			dev->dev_addr[3] = (lo >> 16) & 0xff;
			dev->dev_addr[2] = (lo >> 24) & 0xff;
			dev->dev_addr[1] = hi & 0xff;
			dev->dev_addr[0] = (hi >> 8) & 0xff;
		}
L
Linus Torvalds 已提交
13779 13780 13781
	}

	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13782
#ifdef CONFIG_SPARC
L
Linus Torvalds 已提交
13783 13784 13785 13786 13787
		if (!tg3_get_default_macaddr_sparc(tp))
			return 0;
#endif
		return -EINVAL;
	}
13788
	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
L
Linus Torvalds 已提交
13789 13790 13791
	return 0;
}

13792 13793 13794 13795 13796 13797 13798 13799 13800 13801 13802 13803 13804 13805 13806 13807 13808 13809 13810 13811 13812 13813 13814 13815 13816 13817 13818 13819 13820 13821 13822 13823 13824
#define BOUNDARY_SINGLE_CACHELINE	1
#define BOUNDARY_MULTI_CACHELINE	2

static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
{
	int cacheline_size;
	u8 byte;
	int goal;

	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
	if (byte == 0)
		cacheline_size = 1024;
	else
		cacheline_size = (int) byte * 4;

	/* On 5703 and later chips, the boundary bits have no
	 * effect.
	 */
	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
		goto out;

#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
	goal = BOUNDARY_MULTI_CACHELINE;
#else
#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
	goal = BOUNDARY_SINGLE_CACHELINE;
#else
	goal = 0;
#endif
#endif

M
Matt Carlson 已提交
13825 13826
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13827 13828 13829 13830
		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
		goto out;
	}

13831 13832 13833 13834 13835 13836 13837 13838 13839 13840 13841 13842 13843 13844 13845 13846 13847 13848 13849 13850 13851 13852 13853 13854 13855 13856 13857 13858 13859 13860 13861 13862 13863 13864 13865 13866 13867 13868 13869
	if (!goal)
		goto out;

	/* PCI controllers on most RISC systems tend to disconnect
	 * when a device tries to burst across a cache-line boundary.
	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
	 *
	 * Unfortunately, for PCI-E there are only limited
	 * write-side controls for this, and thus for reads
	 * we will still get the disconnects.  We'll also waste
	 * these PCI cycles for both read and write for chips
	 * other than 5700 and 5701 which do not implement the
	 * boundary bits.
	 */
	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
		switch (cacheline_size) {
		case 16:
		case 32:
		case 64:
		case 128:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
			} else {
				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
			}
			break;

		case 256:
			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
			break;

		default:
			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
			break;
13870
		}
13871 13872 13873 13874 13875 13876 13877 13878 13879 13880 13881 13882 13883 13884 13885 13886
	} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
		switch (cacheline_size) {
		case 16:
		case 32:
		case 64:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
				break;
			}
			/* fallthrough */
		case 128:
		default:
			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
			break;
13887
		}
13888 13889 13890 13891 13892 13893 13894 13895 13896 13897 13898 13899 13900 13901 13902 13903 13904 13905 13906 13907 13908 13909 13910 13911 13912 13913 13914 13915 13916 13917 13918 13919 13920 13921 13922 13923 13924 13925 13926 13927 13928 13929 13930
	} else {
		switch (cacheline_size) {
		case 16:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val |= (DMA_RWCTRL_READ_BNDRY_16 |
					DMA_RWCTRL_WRITE_BNDRY_16);
				break;
			}
			/* fallthrough */
		case 32:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val |= (DMA_RWCTRL_READ_BNDRY_32 |
					DMA_RWCTRL_WRITE_BNDRY_32);
				break;
			}
			/* fallthrough */
		case 64:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val |= (DMA_RWCTRL_READ_BNDRY_64 |
					DMA_RWCTRL_WRITE_BNDRY_64);
				break;
			}
			/* fallthrough */
		case 128:
			if (goal == BOUNDARY_SINGLE_CACHELINE) {
				val |= (DMA_RWCTRL_READ_BNDRY_128 |
					DMA_RWCTRL_WRITE_BNDRY_128);
				break;
			}
			/* fallthrough */
		case 256:
			val |= (DMA_RWCTRL_READ_BNDRY_256 |
				DMA_RWCTRL_WRITE_BNDRY_256);
			break;
		case 512:
			val |= (DMA_RWCTRL_READ_BNDRY_512 |
				DMA_RWCTRL_WRITE_BNDRY_512);
			break;
		case 1024:
		default:
			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
				DMA_RWCTRL_WRITE_BNDRY_1024);
			break;
13931
		}
13932 13933 13934 13935 13936 13937
	}

out:
	return val;
}

L
Linus Torvalds 已提交
13938 13939 13940 13941 13942 13943 13944 13945 13946 13947 13948 13949 13950 13951 13952 13953 13954 13955 13956 13957 13958 13959 13960 13961 13962 13963 13964 13965 13966 13967 13968 13969 13970 13971 13972 13973 13974 13975 13976 13977 13978 13979 13980 13981 13982 13983 13984 13985 13986 13987 13988 13989 13990 13991 13992 13993 13994 13995 13996 13997 13998 13999 14000 14001 14002 14003 14004 14005 14006 14007 14008 14009 14010 14011 14012 14013 14014 14015 14016 14017 14018
static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
{
	struct tg3_internal_buffer_desc test_desc;
	u32 sram_dma_descs;
	int i, ret;

	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;

	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
	tw32(RDMAC_STATUS, 0);
	tw32(WDMAC_STATUS, 0);

	tw32(BUFMGR_MODE, 0);
	tw32(FTQ_RESET, 0);

	test_desc.addr_hi = ((u64) buf_dma) >> 32;
	test_desc.addr_lo = buf_dma & 0xffffffff;
	test_desc.nic_mbuf = 0x00002100;
	test_desc.len = size;

	/*
	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
	 * the *second* time the tg3 driver was getting loaded after an
	 * initial scan.
	 *
	 * Broadcom tells me:
	 *   ...the DMA engine is connected to the GRC block and a DMA
	 *   reset may affect the GRC block in some unpredictable way...
	 *   The behavior of resets to individual blocks has not been tested.
	 *
	 * Broadcom noted the GRC reset will also reset all sub-components.
	 */
	if (to_device) {
		test_desc.cqid_sqid = (13 << 8) | 2;

		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
		udelay(40);
	} else {
		test_desc.cqid_sqid = (16 << 8) | 7;

		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
		udelay(40);
	}
	test_desc.flags = 0x00000005;

	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
		u32 val;

		val = *(((u32 *)&test_desc) + i);
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
				       sram_dma_descs + (i * sizeof(u32)));
		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
	}
	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);

	if (to_device) {
		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
	} else {
		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
	}

	ret = -ENODEV;
	for (i = 0; i < 40; i++) {
		u32 val;

		if (to_device)
			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
		else
			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
		if ((val & 0xffff) == sram_dma_descs) {
			ret = 0;
			break;
		}

		udelay(100);
	}

	return ret;
}

14019
#define TEST_BUFFER_SIZE	0x2000
L
Linus Torvalds 已提交
14020 14021 14022 14023

static int __devinit tg3_test_dma(struct tg3 *tp)
{
	dma_addr_t buf_dma;
14024
	u32 *buf, saved_dma_rwctrl;
14025
	int ret = 0;
L
Linus Torvalds 已提交
14026 14027 14028 14029 14030 14031 14032 14033 14034 14035

	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
	if (!buf) {
		ret = -ENOMEM;
		goto out_nofree;
	}

	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));

14036
	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
L
Linus Torvalds 已提交
14037

M
Matt Carlson 已提交
14038 14039
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14040 14041
		goto out;

L
Linus Torvalds 已提交
14042 14043 14044 14045
	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
		/* DMA read watermark not used on PCIE */
		tp->dma_rwctrl |= 0x00180000;
	} else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
M
Michael Chan 已提交
14046 14047
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
L
Linus Torvalds 已提交
14048 14049 14050 14051 14052 14053 14054
			tp->dma_rwctrl |= 0x003f0000;
		else
			tp->dma_rwctrl |= 0x003f000f;
	} else {
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14055
			u32 read_water = 0x7;
L
Linus Torvalds 已提交
14056

14057 14058 14059 14060 14061 14062 14063 14064
			/* If the 5704 is behind the EPB bridge, we can
			 * do the less restrictive ONE_DMA workaround for
			 * better performance.
			 */
			if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
				tp->dma_rwctrl |= 0x8000;
			else if (ccval == 0x6 || ccval == 0x7)
L
Linus Torvalds 已提交
14065 14066
				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;

14067 14068
			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
				read_water = 4;
14069
			/* Set bit 23 to enable PCIX hw bug fix */
14070 14071 14072 14073
			tp->dma_rwctrl |=
				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
				(1 << 23);
M
Michael Chan 已提交
14074 14075 14076
		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
			/* 5780 always in PCIX mode */
			tp->dma_rwctrl |= 0x00144000;
M
Michael Chan 已提交
14077 14078 14079
		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
			/* 5714 always in PCIX mode */
			tp->dma_rwctrl |= 0x00148000;
L
Linus Torvalds 已提交
14080 14081 14082 14083 14084 14085 14086 14087 14088 14089 14090 14091 14092 14093 14094 14095 14096 14097 14098 14099 14100 14101 14102 14103 14104 14105 14106 14107 14108 14109 14110 14111 14112 14113 14114 14115 14116 14117
		} else {
			tp->dma_rwctrl |= 0x001b000f;
		}
	}

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
		tp->dma_rwctrl &= 0xfffffff0;

	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
		/* Remove this if it causes problems for some boards. */
		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;

		/* On 5700/5701 chips, we need to set this bit.
		 * Otherwise the chip will issue cacheline transactions
		 * to streamable DMA memory with not all the byte
		 * enables turned on.  This is an error on several
		 * RISC PCI controllers, in particular sparc64.
		 *
		 * On 5703/5704 chips, this bit has been reassigned
		 * a different meaning.  In particular, it is used
		 * on those chips to enable a PCI-X workaround.
		 */
		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
	}

	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);

#if 0
	/* Unneeded, already done by tg3_get_invariants.  */
	tg3_switch_clocks(tp);
#endif

	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
		goto out;

14118 14119 14120 14121 14122 14123 14124
	/* It is best to perform DMA test with maximum write burst size
	 * to expose the 5700/5701 write DMA bug.
	 */
	saved_dma_rwctrl = tp->dma_rwctrl;
	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);

L
Linus Torvalds 已提交
14125 14126 14127 14128 14129 14130 14131 14132 14133
	while (1) {
		u32 *p = buf, i;

		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
			p[i] = i;

		/* Send the buffer to the chip. */
		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
		if (ret) {
14134 14135
			pr_err("tg3_test_dma() Write the buffer failed %d\n",
			       ret);
L
Linus Torvalds 已提交
14136 14137 14138 14139 14140 14141 14142 14143 14144
			break;
		}

#if 0
		/* validate data reached card RAM correctly. */
		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
			u32 val;
			tg3_read_mem(tp, 0x2100 + (i*4), &val);
			if (le32_to_cpu(val) != p[i]) {
14145 14146
				pr_err("  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n",
				       val, i);
L
Linus Torvalds 已提交
14147 14148 14149 14150 14151 14152 14153 14154
				/* ret = -ENODEV here? */
			}
			p[i] = 0;
		}
#endif
		/* Now read it back. */
		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
		if (ret) {
14155 14156
			pr_err("tg3_test_dma() Read the buffer failed %d\n",
			       ret);
L
Linus Torvalds 已提交
14157 14158 14159 14160 14161 14162 14163 14164 14165

			break;
		}

		/* Verify it. */
		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
			if (p[i] == i)
				continue;

14166 14167 14168
			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
			    DMA_RWCTRL_WRITE_BNDRY_16) {
				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
L
Linus Torvalds 已提交
14169 14170 14171 14172
				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
				break;
			} else {
14173 14174
				pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
				       p[i], i);
L
Linus Torvalds 已提交
14175 14176 14177 14178 14179 14180 14181 14182 14183 14184 14185
				ret = -ENODEV;
				goto out;
			}
		}

		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
			/* Success. */
			ret = 0;
			break;
		}
	}
14186 14187
	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
	    DMA_RWCTRL_WRITE_BNDRY_16) {
14188 14189 14190 14191 14192 14193
		static struct pci_device_id dma_wait_state_chipsets[] = {
			{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
				     PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
			{ },
		};

14194
		/* DMA test passed without adjusting DMA boundary,
14195 14196
		 * now look for chipsets that are known to expose the
		 * DMA bug without failing the test.
14197
		 */
14198 14199 14200 14201 14202 14203 14204 14205
		if (pci_dev_present(dma_wait_state_chipsets)) {
			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
		}
		else
			/* Safe to use the calculated DMA boundary. */
			tp->dma_rwctrl = saved_dma_rwctrl;

14206 14207
		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
	}
L
Linus Torvalds 已提交
14208 14209 14210 14211 14212 14213 14214 14215 14216 14217 14218 14219 14220 14221 14222 14223 14224 14225 14226 14227 14228 14229 14230 14231 14232 14233 14234

out:
	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
out_nofree:
	return ret;
}

static void __devinit tg3_init_link_config(struct tg3 *tp)
{
	tp->link_config.advertising =
		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
		 ADVERTISED_Autoneg | ADVERTISED_MII);
	tp->link_config.speed = SPEED_INVALID;
	tp->link_config.duplex = DUPLEX_INVALID;
	tp->link_config.autoneg = AUTONEG_ENABLE;
	tp->link_config.active_speed = SPEED_INVALID;
	tp->link_config.active_duplex = DUPLEX_INVALID;
	tp->link_config.phy_is_low_power = 0;
	tp->link_config.orig_speed = SPEED_INVALID;
	tp->link_config.orig_duplex = DUPLEX_INVALID;
	tp->link_config.orig_autoneg = AUTONEG_INVALID;
}

static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
14235 14236 14237 14238 14239 14240 14241 14242 14243 14244 14245 14246 14247 14248 14249 14250
	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
		tp->bufmgr_config.mbuf_read_dma_low_water =
			DEFAULT_MB_RDMA_LOW_WATER_5705;
		tp->bufmgr_config.mbuf_mac_rx_low_water =
			DEFAULT_MB_MACRX_LOW_WATER_57765;
		tp->bufmgr_config.mbuf_high_water =
			DEFAULT_MB_HIGH_WATER_57765;

		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
			DEFAULT_MB_RDMA_LOW_WATER_5705;
		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
		tp->bufmgr_config.mbuf_high_water_jumbo =
			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
	} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14251 14252 14253 14254 14255 14256
		tp->bufmgr_config.mbuf_read_dma_low_water =
			DEFAULT_MB_RDMA_LOW_WATER_5705;
		tp->bufmgr_config.mbuf_mac_rx_low_water =
			DEFAULT_MB_MACRX_LOW_WATER_5705;
		tp->bufmgr_config.mbuf_high_water =
			DEFAULT_MB_HIGH_WATER_5705;
M
Michael Chan 已提交
14257 14258 14259 14260 14261 14262
		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
			tp->bufmgr_config.mbuf_mac_rx_low_water =
				DEFAULT_MB_MACRX_LOW_WATER_5906;
			tp->bufmgr_config.mbuf_high_water =
				DEFAULT_MB_HIGH_WATER_5906;
		}
14263 14264 14265 14266 14267 14268 14269 14270 14271 14272 14273 14274 14275 14276 14277 14278 14279 14280 14281 14282 14283 14284

		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
		tp->bufmgr_config.mbuf_high_water_jumbo =
			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
	} else {
		tp->bufmgr_config.mbuf_read_dma_low_water =
			DEFAULT_MB_RDMA_LOW_WATER;
		tp->bufmgr_config.mbuf_mac_rx_low_water =
			DEFAULT_MB_MACRX_LOW_WATER;
		tp->bufmgr_config.mbuf_high_water =
			DEFAULT_MB_HIGH_WATER;

		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
		tp->bufmgr_config.mbuf_high_water_jumbo =
			DEFAULT_MB_HIGH_WATER_JUMBO;
	}
L
Linus Torvalds 已提交
14285 14286 14287 14288 14289 14290 14291

	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
}

static char * __devinit tg3_phy_string(struct tg3 *tp)
{
14292 14293 14294 14295 14296 14297 14298 14299 14300 14301 14302 14303 14304 14305 14306 14307 14308 14309 14310 14311 14312 14313
	switch (tp->phy_id & TG3_PHY_ID_MASK) {
	case TG3_PHY_ID_BCM5400:	return "5400";
	case TG3_PHY_ID_BCM5401:	return "5401";
	case TG3_PHY_ID_BCM5411:	return "5411";
	case TG3_PHY_ID_BCM5701:	return "5701";
	case TG3_PHY_ID_BCM5703:	return "5703";
	case TG3_PHY_ID_BCM5704:	return "5704";
	case TG3_PHY_ID_BCM5705:	return "5705";
	case TG3_PHY_ID_BCM5750:	return "5750";
	case TG3_PHY_ID_BCM5752:	return "5752";
	case TG3_PHY_ID_BCM5714:	return "5714";
	case TG3_PHY_ID_BCM5780:	return "5780";
	case TG3_PHY_ID_BCM5755:	return "5755";
	case TG3_PHY_ID_BCM5787:	return "5787";
	case TG3_PHY_ID_BCM5784:	return "5784";
	case TG3_PHY_ID_BCM5756:	return "5722/5756";
	case TG3_PHY_ID_BCM5906:	return "5906";
	case TG3_PHY_ID_BCM5761:	return "5761";
	case TG3_PHY_ID_BCM5718C:	return "5718C";
	case TG3_PHY_ID_BCM5718S:	return "5718S";
	case TG3_PHY_ID_BCM57765:	return "57765";
	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
L
Linus Torvalds 已提交
14314 14315
	case 0:			return "serdes";
	default:		return "unknown";
14316
	}
L
Linus Torvalds 已提交
14317 14318
}

M
Michael Chan 已提交
14319 14320 14321 14322 14323 14324 14325 14326 14327 14328 14329 14330 14331 14332 14333 14334 14335 14336 14337 14338 14339 14340 14341 14342 14343 14344 14345 14346 14347 14348 14349 14350 14351 14352 14353 14354
static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
{
	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
		strcpy(str, "PCI Express");
		return str;
	} else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;

		strcpy(str, "PCIX:");

		if ((clock_ctrl == 7) ||
		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
			strcat(str, "133MHz");
		else if (clock_ctrl == 0)
			strcat(str, "33MHz");
		else if (clock_ctrl == 2)
			strcat(str, "50MHz");
		else if (clock_ctrl == 4)
			strcat(str, "66MHz");
		else if (clock_ctrl == 6)
			strcat(str, "100MHz");
	} else {
		strcpy(str, "PCI:");
		if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
			strcat(str, "66MHz");
		else
			strcat(str, "33MHz");
	}
	if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
		strcat(str, ":32-bit");
	else
		strcat(str, ":64-bit");
	return str;
}

M
Michael Chan 已提交
14355
static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
L
Linus Torvalds 已提交
14356 14357 14358 14359 14360 14361 14362 14363 14364 14365
{
	struct pci_dev *peer;
	unsigned int func, devnr = tp->pdev->devfn & ~7;

	for (func = 0; func < 8; func++) {
		peer = pci_get_slot(tp->pdev->bus, devnr | func);
		if (peer && peer != tp->pdev)
			break;
		pci_dev_put(peer);
	}
M
Michael Chan 已提交
14366 14367 14368 14369 14370 14371 14372
	/* 5704 can be configured in single-port mode, set peer to
	 * tp->pdev in that case.
	 */
	if (!peer) {
		peer = tp->pdev;
		return peer;
	}
L
Linus Torvalds 已提交
14373 14374 14375 14376 14377 14378 14379 14380 14381 14382

	/*
	 * We don't need to keep the refcount elevated; there's no way
	 * to remove one half of this device without removing the other
	 */
	pci_dev_put(peer);

	return peer;
}

14383 14384 14385 14386 14387 14388 14389 14390 14391 14392 14393 14394 14395 14396 14397 14398 14399 14400 14401 14402 14403 14404 14405
static void __devinit tg3_init_coal(struct tg3 *tp)
{
	struct ethtool_coalesce *ec = &tp->coal;

	memset(ec, 0, sizeof(*ec));
	ec->cmd = ETHTOOL_GCOALESCE;
	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;

	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
				 HOSTCC_MODE_CLRTICK_TXBD)) {
		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
	}
M
Michael Chan 已提交
14406 14407 14408 14409 14410 14411

	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
		ec->rx_coalesce_usecs_irq = 0;
		ec->tx_coalesce_usecs_irq = 0;
		ec->stats_block_coalesce_usecs = 0;
	}
14412 14413
}

S
Stephen Hemminger 已提交
14414 14415 14416
static const struct net_device_ops tg3_netdev_ops = {
	.ndo_open		= tg3_open,
	.ndo_stop		= tg3_close,
14417 14418 14419 14420 14421 14422 14423 14424 14425 14426 14427 14428 14429 14430 14431 14432 14433 14434 14435 14436
	.ndo_start_xmit		= tg3_start_xmit,
	.ndo_get_stats		= tg3_get_stats,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_multicast_list	= tg3_set_rx_mode,
	.ndo_set_mac_address	= tg3_set_mac_addr,
	.ndo_do_ioctl		= tg3_ioctl,
	.ndo_tx_timeout		= tg3_tx_timeout,
	.ndo_change_mtu		= tg3_change_mtu,
#if TG3_VLAN_TAG_USED
	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= tg3_poll_controller,
#endif
};

static const struct net_device_ops tg3_netdev_ops_dma_bug = {
	.ndo_open		= tg3_open,
	.ndo_stop		= tg3_close,
	.ndo_start_xmit		= tg3_start_xmit_dma_bug,
S
Stephen Hemminger 已提交
14437 14438 14439 14440 14441 14442 14443 14444 14445 14446 14447 14448 14449 14450 14451
	.ndo_get_stats		= tg3_get_stats,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_multicast_list	= tg3_set_rx_mode,
	.ndo_set_mac_address	= tg3_set_mac_addr,
	.ndo_do_ioctl		= tg3_ioctl,
	.ndo_tx_timeout		= tg3_tx_timeout,
	.ndo_change_mtu		= tg3_change_mtu,
#if TG3_VLAN_TAG_USED
	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= tg3_poll_controller,
#endif
};

L
Linus Torvalds 已提交
14452 14453 14454 14455 14456
static int __devinit tg3_init_one(struct pci_dev *pdev,
				  const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct tg3 *tp;
M
Matt Carlson 已提交
14457 14458
	int i, err, pm_cap;
	u32 sndmbx, rcvmbx, intmbx;
M
Michael Chan 已提交
14459
	char str[40];
M
Michael Chan 已提交
14460
	u64 dma_mask, persist_dma_mask;
L
Linus Torvalds 已提交
14461

14462
	printk_once(KERN_INFO "%s\n", version);
L
Linus Torvalds 已提交
14463 14464 14465

	err = pci_enable_device(pdev);
	if (err) {
14466
		pr_err("Cannot enable PCI device, aborting\n");
L
Linus Torvalds 已提交
14467 14468 14469 14470 14471
		return err;
	}

	err = pci_request_regions(pdev, DRV_MODULE_NAME);
	if (err) {
14472
		pr_err("Cannot obtain PCI resources, aborting\n");
L
Linus Torvalds 已提交
14473 14474 14475 14476 14477 14478 14479 14480
		goto err_out_disable_pdev;
	}

	pci_set_master(pdev);

	/* Find power-management capability. */
	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
	if (pm_cap == 0) {
14481
		pr_err("Cannot find PowerManagement capability, aborting\n");
L
Linus Torvalds 已提交
14482 14483 14484 14485
		err = -EIO;
		goto err_out_free_res;
	}

M
Matt Carlson 已提交
14486
	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
L
Linus Torvalds 已提交
14487
	if (!dev) {
14488
		pr_err("Etherdev alloc failed, aborting\n");
L
Linus Torvalds 已提交
14489 14490 14491 14492 14493 14494 14495 14496 14497 14498 14499 14500 14501 14502 14503 14504
		err = -ENOMEM;
		goto err_out_free_res;
	}

	SET_NETDEV_DEV(dev, &pdev->dev);

#if TG3_VLAN_TAG_USED
	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif

	tp = netdev_priv(dev);
	tp->pdev = pdev;
	tp->dev = dev;
	tp->pm_cap = pm_cap;
	tp->rx_mode = TG3_DEF_RX_MODE;
	tp->tx_mode = TG3_DEF_TX_MODE;
14505

L
Linus Torvalds 已提交
14506 14507 14508 14509 14510 14511 14512 14513 14514 14515 14516 14517 14518 14519 14520 14521 14522 14523 14524 14525 14526 14527 14528 14529 14530 14531 14532 14533
	if (tg3_debug > 0)
		tp->msg_enable = tg3_debug;
	else
		tp->msg_enable = TG3_DEF_MSG_ENABLE;

	/* The word/byte swap controls here control register access byte
	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
	 * setting below.
	 */
	tp->misc_host_ctrl =
		MISC_HOST_CTRL_MASK_PCI_INT |
		MISC_HOST_CTRL_WORD_SWAP |
		MISC_HOST_CTRL_INDIR_ACCESS |
		MISC_HOST_CTRL_PCISTATE_RW;

	/* The NONFRM (non-frame) byte/word swap controls take effect
	 * on descriptor entries, anything which isn't packet data.
	 *
	 * The StrongARM chips on the board (one for tx, one for rx)
	 * are running in big-endian mode.
	 */
	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
			GRC_MODE_WSWAP_NONFRM_DATA);
#ifdef __BIG_ENDIAN
	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif
	spin_lock_init(&tp->lock);
	spin_lock_init(&tp->indirect_lock);
D
David Howells 已提交
14534
	INIT_WORK(&tp->reset_task, tg3_reset_task);
L
Linus Torvalds 已提交
14535

M
Matt Carlson 已提交
14536
	tp->regs = pci_ioremap_bar(pdev, BAR_0);
A
Andy Gospodarek 已提交
14537
	if (!tp->regs) {
14538
		netdev_err(dev, "Cannot map device registers, aborting\n");
L
Linus Torvalds 已提交
14539 14540 14541 14542 14543 14544 14545 14546 14547 14548 14549 14550 14551 14552 14553
		err = -ENOMEM;
		goto err_out_free_dev;
	}

	tg3_init_link_config(tp);

	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;

	dev->ethtool_ops = &tg3_ethtool_ops;
	dev->watchdog_timeo = TG3_TX_TIMEOUT;
	dev->irq = pdev->irq;

	err = tg3_get_invariants(tp);
	if (err) {
14554
		netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
L
Linus Torvalds 已提交
14555 14556 14557
		goto err_out_iounmap;
	}

14558 14559
	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14560 14561 14562 14563 14564
		dev->netdev_ops = &tg3_netdev_ops;
	else
		dev->netdev_ops = &tg3_netdev_ops_dma_bug;


14565 14566
	/* The EPB bridge inside 5714, 5715, and 5780 and any
	 * device behind the EPB cannot support DMA addresses > 40-bit.
M
Michael Chan 已提交
14567 14568 14569 14570
	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
	 * do DMA address check in tg3_start_xmit().
	 */
14571
	if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14572
		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14573
	else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14574
		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
M
Michael Chan 已提交
14575
#ifdef CONFIG_HIGHMEM
14576
		dma_mask = DMA_BIT_MASK(64);
M
Michael Chan 已提交
14577
#endif
14578
	} else
14579
		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
M
Michael Chan 已提交
14580 14581

	/* Configure DMA attributes. */
14582
	if (dma_mask > DMA_BIT_MASK(32)) {
M
Michael Chan 已提交
14583 14584 14585 14586 14587 14588
		err = pci_set_dma_mask(pdev, dma_mask);
		if (!err) {
			dev->features |= NETIF_F_HIGHDMA;
			err = pci_set_consistent_dma_mask(pdev,
							  persist_dma_mask);
			if (err < 0) {
14589
				netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
M
Michael Chan 已提交
14590 14591 14592 14593
				goto err_out_iounmap;
			}
		}
	}
14594 14595
	if (err || dma_mask == DMA_BIT_MASK(32)) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
M
Michael Chan 已提交
14596
		if (err) {
14597
			netdev_err(dev, "No usable DMA configuration, aborting\n");
M
Michael Chan 已提交
14598 14599 14600 14601
			goto err_out_iounmap;
		}
	}

14602
	tg3_init_bufmgr_config(tp);
L
Linus Torvalds 已提交
14603

M
Matt Carlson 已提交
14604 14605 14606
	/* Selectively allow TSO based on operating conditions */
	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
	    (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
L
Linus Torvalds 已提交
14607
		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
M
Matt Carlson 已提交
14608 14609 14610
	else {
		tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
		tp->fw_needed = NULL;
L
Linus Torvalds 已提交
14611
	}
M
Matt Carlson 已提交
14612 14613 14614

	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
		tp->fw_needed = FIRMWARE_TG3;
L
Linus Torvalds 已提交
14615

M
Michael Chan 已提交
14616 14617 14618 14619
	/* TSO is on by default on chips that support hardware TSO.
	 * Firmware TSO on older chips gives lower performance, so it
	 * is off by default, but can be enabled using ethtool.
	 */
M
Matt Carlson 已提交
14620 14621 14622 14623 14624 14625 14626
	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
	    (dev->features & NETIF_F_IP_CSUM))
		dev->features |= NETIF_F_TSO;

	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
		if (dev->features & NETIF_F_IPV6_CSUM)
M
Michael Chan 已提交
14627
			dev->features |= NETIF_F_TSO6;
M
Matt Carlson 已提交
14628 14629
		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
M
Matt Carlson 已提交
14630 14631
		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
M
Matt Carlson 已提交
14632
			GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
M
Matt Carlson 已提交
14633
		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
M
Matt Carlson 已提交
14634
			dev->features |= NETIF_F_TSO_ECN;
M
Michael Chan 已提交
14635
	}
L
Linus Torvalds 已提交
14636 14637 14638 14639 14640 14641 14642 14643 14644 14645

	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
		tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
		tp->rx_pending = 63;
	}

	err = tg3_get_device_address(tp);
	if (err) {
14646
		netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
M
Matt Carlson 已提交
14647
		goto err_out_iounmap;
L
Linus Torvalds 已提交
14648 14649
	}

M
Matt Carlson 已提交
14650
	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
M
Matt Carlson 已提交
14651
		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
A
Al Viro 已提交
14652
		if (!tp->aperegs) {
14653
			netdev_err(dev, "Cannot map APE registers, aborting\n");
M
Matt Carlson 已提交
14654
			err = -ENOMEM;
M
Matt Carlson 已提交
14655
			goto err_out_iounmap;
M
Matt Carlson 已提交
14656 14657 14658
		}

		tg3_ape_lock_init(tp);
14659 14660 14661

		if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
			tg3_read_dash_ver(tp);
M
Matt Carlson 已提交
14662 14663
	}

L
Linus Torvalds 已提交
14664 14665 14666 14667 14668 14669 14670 14671
	/*
	 * Reset chip in case UNDI or EFI driver did not shutdown
	 * DMA self test will enable WDMAC and we'll see (spurious)
	 * pending DMA on the PCI bus at that point.
	 */
	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
M
Michael Chan 已提交
14672
		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
L
Linus Torvalds 已提交
14673 14674 14675 14676
	}

	err = tg3_test_dma(tp);
	if (err) {
14677
		netdev_err(dev, "DMA engine test failed, aborting\n");
M
Matt Carlson 已提交
14678
		goto err_out_apeunmap;
L
Linus Torvalds 已提交
14679 14680 14681 14682
	}

	/* flow control autonegotiation is default behavior */
	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14683
	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
L
Linus Torvalds 已提交
14684

14685 14686 14687 14688 14689 14690 14691 14692 14693 14694 14695 14696 14697 14698 14699 14700 14701 14702 14703 14704 14705 14706 14707 14708 14709 14710 14711 14712 14713 14714 14715 14716 14717 14718 14719 14720 14721 14722 14723 14724 14725 14726 14727 14728 14729 14730 14731
	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
		struct tg3_napi *tnapi = &tp->napi[i];

		tnapi->tp = tp;
		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;

		tnapi->int_mbox = intmbx;
		if (i < 4)
			intmbx += 0x8;
		else
			intmbx += 0x4;

		tnapi->consmbox = rcvmbx;
		tnapi->prodmbox = sndmbx;

		if (i) {
			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
			netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
		} else {
			tnapi->coal_now = HOSTCC_MODE_NOW;
			netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
		}

		if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
			break;

		/*
		 * If we support MSIX, we'll be using RSS.  If we're using
		 * RSS, the first vector only handles link interrupts and the
		 * remaining vectors handle rx and tx interrupts.  Reuse the
		 * mailbox values for the next iteration.  The values we setup
		 * above are still useful for the single vectored mode.
		 */
		if (!i)
			continue;

		rcvmbx += 0x8;

		if (sndmbx & 0x4)
			sndmbx -= 0x4;
		else
			sndmbx += 0xc;
	}

14732 14733
	tg3_init_coal(tp);

14734 14735
	pci_set_drvdata(pdev, dev);

L
Linus Torvalds 已提交
14736 14737
	err = register_netdev(dev);
	if (err) {
14738
		netdev_err(dev, "Cannot register net device, aborting\n");
M
Matt Carlson 已提交
14739
		goto err_out_apeunmap;
L
Linus Torvalds 已提交
14740 14741
	}

14742 14743 14744 14745 14746
	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
		    tp->board_part_number,
		    tp->pci_chip_rev_id,
		    tg3_bus_string(tp, str),
		    dev->dev_addr);
L
Linus Torvalds 已提交
14747

14748 14749 14750
	if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
		struct phy_device *phydev;
		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14751 14752
		netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
			    phydev->drv->name, dev_name(&phydev->dev));
14753
	} else
14754 14755 14756 14757 14758 14759 14760 14761 14762 14763 14764 14765 14766 14767 14768 14769 14770
		netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
			    tg3_phy_string(tp),
			    ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
			     ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
			      "10/100/1000Base-T")),
			    (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);

	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
		    (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
		    (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
		    (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
		    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
		    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
		    tp->dma_rwctrl,
		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
L
Linus Torvalds 已提交
14771 14772 14773

	return 0;

M
Matt Carlson 已提交
14774 14775 14776 14777 14778 14779
err_out_apeunmap:
	if (tp->aperegs) {
		iounmap(tp->aperegs);
		tp->aperegs = NULL;
	}

L
Linus Torvalds 已提交
14780
err_out_iounmap:
14781 14782
	if (tp->regs) {
		iounmap(tp->regs);
P
Peter Hagervall 已提交
14783
		tp->regs = NULL;
14784
	}
L
Linus Torvalds 已提交
14785 14786 14787 14788 14789 14790 14791 14792 14793 14794 14795 14796 14797 14798 14799 14800 14801 14802 14803 14804

err_out_free_dev:
	free_netdev(dev);

err_out_free_res:
	pci_release_regions(pdev);

err_out_disable_pdev:
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	return err;
}

static void __devexit tg3_remove_one(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);

	if (dev) {
		struct tg3 *tp = netdev_priv(dev);

14805 14806 14807
		if (tp->fw)
			release_firmware(tp->fw);

M
Michael Chan 已提交
14808
		flush_scheduled_work();
M
Matt Carlson 已提交
14809

M
Matt Carlson 已提交
14810 14811
		if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
			tg3_phy_fini(tp);
M
Matt Carlson 已提交
14812
			tg3_mdio_fini(tp);
M
Matt Carlson 已提交
14813
		}
M
Matt Carlson 已提交
14814

L
Linus Torvalds 已提交
14815
		unregister_netdev(dev);
M
Matt Carlson 已提交
14816 14817 14818 14819
		if (tp->aperegs) {
			iounmap(tp->aperegs);
			tp->aperegs = NULL;
		}
14820 14821
		if (tp->regs) {
			iounmap(tp->regs);
P
Peter Hagervall 已提交
14822
			tp->regs = NULL;
14823
		}
L
Linus Torvalds 已提交
14824 14825 14826 14827 14828 14829 14830 14831 14832 14833 14834
		free_netdev(dev);
		pci_release_regions(pdev);
		pci_disable_device(pdev);
		pci_set_drvdata(pdev, NULL);
	}
}

static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct tg3 *tp = netdev_priv(dev);
14835
	pci_power_t target_state;
L
Linus Torvalds 已提交
14836 14837
	int err;

14838 14839 14840 14841 14842 14843
	/* PCI register 4 needs to be saved whether netif_running() or not.
	 * MSI address and data need to be saved if using MSI and
	 * netif_running().
	 */
	pci_save_state(pdev);

L
Linus Torvalds 已提交
14844 14845 14846
	if (!netif_running(dev))
		return 0;

M
Michael Chan 已提交
14847
	flush_scheduled_work();
M
Matt Carlson 已提交
14848
	tg3_phy_stop(tp);
L
Linus Torvalds 已提交
14849 14850 14851 14852
	tg3_netif_stop(tp);

	del_timer_sync(&tp->timer);

14853
	tg3_full_lock(tp, 1);
L
Linus Torvalds 已提交
14854
	tg3_disable_ints(tp);
14855
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
14856 14857 14858

	netif_device_detach(dev);

14859
	tg3_full_lock(tp, 0);
M
Michael Chan 已提交
14860
	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
M
Michael Chan 已提交
14861
	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14862
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
14863

14864 14865 14866
	target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;

	err = tg3_set_power_state(tp, target_state);
L
Linus Torvalds 已提交
14867
	if (err) {
M
Matt Carlson 已提交
14868 14869
		int err2;

14870
		tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
14871

M
Michael Chan 已提交
14872
		tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
M
Matt Carlson 已提交
14873 14874
		err2 = tg3_restart_hw(tp, 1);
		if (err2)
M
Michael Chan 已提交
14875
			goto out;
L
Linus Torvalds 已提交
14876 14877 14878 14879 14880 14881 14882

		tp->timer.expires = jiffies + tp->timer_offset;
		add_timer(&tp->timer);

		netif_device_attach(dev);
		tg3_netif_start(tp);

M
Michael Chan 已提交
14883
out:
14884
		tg3_full_unlock(tp);
M
Matt Carlson 已提交
14885 14886 14887

		if (!err2)
			tg3_phy_start(tp);
L
Linus Torvalds 已提交
14888 14889 14890 14891 14892 14893 14894 14895 14896 14897 14898
	}

	return err;
}

static int tg3_resume(struct pci_dev *pdev)
{
	struct net_device *dev = pci_get_drvdata(pdev);
	struct tg3 *tp = netdev_priv(dev);
	int err;

14899 14900
	pci_restore_state(tp->pdev);

L
Linus Torvalds 已提交
14901 14902 14903
	if (!netif_running(dev))
		return 0;

M
Michael Chan 已提交
14904
	err = tg3_set_power_state(tp, PCI_D0);
L
Linus Torvalds 已提交
14905 14906 14907 14908 14909
	if (err)
		return err;

	netif_device_attach(dev);

14910
	tg3_full_lock(tp, 0);
L
Linus Torvalds 已提交
14911

M
Michael Chan 已提交
14912
	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
M
Michael Chan 已提交
14913 14914 14915
	err = tg3_restart_hw(tp, 1);
	if (err)
		goto out;
L
Linus Torvalds 已提交
14916 14917 14918 14919 14920 14921

	tp->timer.expires = jiffies + tp->timer_offset;
	add_timer(&tp->timer);

	tg3_netif_start(tp);

M
Michael Chan 已提交
14922
out:
14923
	tg3_full_unlock(tp);
L
Linus Torvalds 已提交
14924

M
Matt Carlson 已提交
14925 14926 14927
	if (!err)
		tg3_phy_start(tp);

M
Michael Chan 已提交
14928
	return err;
L
Linus Torvalds 已提交
14929 14930 14931 14932 14933 14934 14935 14936 14937 14938 14939 14940 14941
}

static struct pci_driver tg3_driver = {
	.name		= DRV_MODULE_NAME,
	.id_table	= tg3_pci_tbl,
	.probe		= tg3_init_one,
	.remove		= __devexit_p(tg3_remove_one),
	.suspend	= tg3_suspend,
	.resume		= tg3_resume
};

static int __init tg3_init(void)
{
14942
	return pci_register_driver(&tg3_driver);
L
Linus Torvalds 已提交
14943 14944 14945 14946 14947 14948 14949 14950 14951
}

static void __exit tg3_cleanup(void)
{
	pci_unregister_driver(&tg3_driver);
}

module_init(tg3_init);
module_exit(tg3_cleanup);