sky2.c 98.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * New driver for Marvell Yukon 2 chipset.
 * Based on earlier sk98lin, and skge driver.
 *
 * This driver intentionally does not support all the features
 * of the original driver such as link fail-over and link management because
 * those should be done at higher levels.
 *
 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
13
 * the Free Software Foundation; either version 2 of the License.
14 15 16
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
S
Stephen Hemminger 已提交
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 19 20 21 22 23 24
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

S
Stephen Hemminger 已提交
25
#include <linux/crc32.h>
26 27 28 29
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/module.h>
#include <linux/netdevice.h>
A
Andrew Morton 已提交
30
#include <linux/dma-mapping.h>
31 32 33 34 35 36 37
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <linux/delay.h>
38
#include <linux/workqueue.h>
39
#include <linux/if_vlan.h>
S
Stephen Hemminger 已提交
40
#include <linux/prefetch.h>
41
#include <linux/mii.h>
42 43 44

#include <asm/irq.h>

45 46 47 48
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define SKY2_VLAN_TAG_USED 1
#endif

49 50 51
#include "sky2.h"

#define DRV_NAME		"sky2"
52
#define DRV_VERSION		"1.11.1"
53 54 55 56 57
#define PFX			DRV_NAME " "

/*
 * The Yukon II chipset takes 64 bit command blocks (called list elements)
 * that are organized into three (receive, transmit, status) different rings
58
 * similar to Tigon3.
59 60
 */

61
#define RX_LE_SIZE	    	1024
62
#define RX_LE_BYTES		(RX_LE_SIZE*sizeof(struct sky2_rx_le))
63
#define RX_MAX_PENDING		(RX_LE_SIZE/6 - 2)
64
#define RX_DEF_PENDING		RX_MAX_PENDING
65
#define RX_SKB_ALIGN		8
66
#define RX_BUF_WRITE		16
S
Stephen Hemminger 已提交
67 68 69 70

#define TX_RING_SIZE		512
#define TX_DEF_PENDING		(TX_RING_SIZE - 1)
#define TX_MIN_PENDING		64
71
#define MAX_SKB_TX_LE		(4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
72

S
Stephen Hemminger 已提交
73
#define STATUS_RING_SIZE	2048	/* 2 ports * (TX + 2*RX) */
74 75 76 77 78
#define STATUS_LE_BYTES		(STATUS_RING_SIZE*sizeof(struct sky2_status_le))
#define TX_WATCHDOG		(5 * HZ)
#define NAPI_WEIGHT		64
#define PHY_RETRIES		1000

79 80
#define RING_NEXT(x,s)	(((x)+1) & ((s)-1))

81
static const u32 default_msg =
S
Stephen Hemminger 已提交
82 83
    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
84
    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
85

S
Stephen Hemminger 已提交
86
static int debug = -1;		/* defaults above */
87 88 89
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");

90
static int copybreak __read_mostly = 128;
91 92 93
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");

94 95 96 97
static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");

S
Stephen Hemminger 已提交
98
static int idle_timeout = 0;
99
module_param(idle_timeout, int, 0);
S
Stephen Hemminger 已提交
100
MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)");
101

102
static const struct pci_device_id sky2_id_table[] = {
103 104
	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
105
	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },	/* DGE-560T */
106
	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, 	/* DGE-550SX */
107
	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },	/* DGE-560SX */
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
129 130
	{ 0 }
};
S
Stephen Hemminger 已提交
131

132 133 134 135 136
MODULE_DEVICE_TABLE(pci, sky2_id_table);

/* Avoid conditionals by using array */
static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
S
Stephen Hemminger 已提交
137
static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
138

139 140 141 142 143 144 145
/* This driver supports yukon2 chipset only */
static const char *yukon2_name[] = {
	"XL",		/* 0xb3 */
	"EC Ultra", 	/* 0xb4 */
	"UNKNOWN",	/* 0xb5 */
	"EC",		/* 0xb6 */
	"FE",		/* 0xb7 */
S
Stephen Hemminger 已提交
146 147 148
};

/* Access to external PHY */
149
static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
150 151 152 153 154 155 156 157 158
{
	int i;

	gma_write16(hw, port, GM_SMI_DATA, val);
	gma_write16(hw, port, GM_SMI_CTRL,
		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));

	for (i = 0; i < PHY_RETRIES; i++) {
		if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
159
			return 0;
S
Stephen Hemminger 已提交
160
		udelay(1);
161
	}
162

S
Stephen Hemminger 已提交
163
	printk(KERN_WARNING PFX "%s: phy write timeout\n", hw->dev[port]->name);
164
	return -ETIMEDOUT;
165 166
}

167
static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
168 169 170
{
	int i;

S
Stephen Hemminger 已提交
171
	gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
172 173 174
		    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);

	for (i = 0; i < PHY_RETRIES; i++) {
175 176 177 178 179
		if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) {
			*val = gma_read16(hw, port, GM_SMI_DATA);
			return 0;
		}

S
Stephen Hemminger 已提交
180
		udelay(1);
181 182
	}

183 184 185 186 187 188 189 190 191 192
	return -ETIMEDOUT;
}

static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
	u16 v;

	if (__gm_phy_read(hw, port, reg, &v) != 0)
		printk(KERN_WARNING PFX "%s: phy read timeout\n", hw->dev[port]->name);
	return v;
193 194
}

195

196 197 198 199 200
static void sky2_power_on(struct sky2_hw *hw)
{
	/* switch power to VCC (WA for VAUX problem) */
	sky2_write8(hw, B0_POWER_CTRL,
		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
201

202 203
	/* disable Core Clock Division, */
	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
204

205 206 207 208 209 210 211 212
	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
		/* enable bits are inverted */
		sky2_write8(hw, B2_Y2_CLK_GATE,
			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
	else
		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
213

214 215
	if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
		u32 reg1;
216

217 218 219 220 221
		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
		reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
		reg1 &= P_ASPM_CONTROL_MSK;
		sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
		sky2_pci_write32(hw, PCI_DEV_REG5, 0);
222
	}
223
}
224

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static void sky2_power_aux(struct sky2_hw *hw)
{
	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
	else
		/* enable bits are inverted */
		sky2_write8(hw, B2_Y2_CLK_GATE,
			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);

	/* switch power to VAUX */
	if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
		sky2_write8(hw, B0_POWER_CTRL,
			    (PC_VAUX_ENA | PC_VCC_ENA |
			     PC_VAUX_ON | PC_VCC_OFF));
241 242
}

243
static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
244 245 246 247 248 249 250
{
	u16 reg;

	/* disable all GMAC IRQ's */
	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
	/* disable PHY IRQs */
	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
S
Stephen Hemminger 已提交
251

252 253 254 255 256 257 258 259 260 261
	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
	gma_write16(hw, port, GM_MC_ADDR_H4, 0);

	reg = gma_read16(hw, port, GM_RX_CTRL);
	reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
	gma_write16(hw, port, GM_RX_CTRL, reg);
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
/* flow control to advertise bits */
static const u16 copper_fc_adv[] = {
	[FC_NONE]	= 0,
	[FC_TX]		= PHY_M_AN_ASP,
	[FC_RX]		= PHY_M_AN_PC,
	[FC_BOTH]	= PHY_M_AN_PC | PHY_M_AN_ASP,
};

/* flow control to advertise bits when using 1000BaseX */
static const u16 fiber_fc_adv[] = {
	[FC_BOTH] = PHY_M_P_BOTH_MD_X,
	[FC_TX]   = PHY_M_P_ASYM_MD_X,
	[FC_RX]	  = PHY_M_P_SYM_MD_X,
	[FC_NONE] = PHY_M_P_NO_PAUSE_X,
};

/* flow control to GMA disable bits */
static const u16 gm_fc_disable[] = {
	[FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
	[FC_TX]	  = GM_GPCR_FC_RX_DIS,
	[FC_RX]	  = GM_GPCR_FC_TX_DIS,
	[FC_BOTH] = 0,
};


287 288 289
static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
{
	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
290
	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
291

292
	if (sky2->autoneg == AUTONEG_ENABLE &&
293
	    !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
294 295 296
		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);

		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
S
Stephen Hemminger 已提交
297
			   PHY_M_EC_MAC_S_MSK);
298 299 300 301 302 303 304 305 306 307 308
		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);

		if (hw->chip_id == CHIP_ID_YUKON_EC)
			ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
		else
			ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);

		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
	}

	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
S
Stephen Hemminger 已提交
309
	if (sky2_is_copper(hw)) {
310 311 312 313 314 315 316 317 318 319 320
		if (hw->chip_id == CHIP_ID_YUKON_FE) {
			/* enable automatic crossover */
			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
		} else {
			/* disable energy detect */
			ctrl &= ~PHY_M_PC_EN_DET_MSK;

			/* enable automatic crossover */
			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);

			if (sky2->autoneg == AUTONEG_ENABLE &&
321
			    (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
322 323 324 325 326 327 328 329 330
				ctrl &= ~PHY_M_PC_DSC_MSK;
				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
			}
		}
	} else {
		/* workaround for deviation #4.88 (CRC errors) */
		/* disable Automatic Crossover */

		ctrl &= ~PHY_M_PC_MDIX_MSK;
S
Stephen Hemminger 已提交
331
	}
332

S
Stephen Hemminger 已提交
333 334 335 336 337
	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);

	/* special setup for PHY 88E1112 Fiber */
	if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) {
		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
338

S
Stephen Hemminger 已提交
339 340 341 342 343 344 345 346
		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
		ctrl &= ~PHY_M_MAC_MD_MSK;
		ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);

		if (hw->pmd_type  == 'P') {
347 348
			/* select page 1 to access Fiber registers */
			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
S
Stephen Hemminger 已提交
349 350 351 352 353

			/* for SFP-module set SIGDET polarity to low */
			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
			ctrl |= PHY_M_FIB_SIGD_POL;
			gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
354
		}
S
Stephen Hemminger 已提交
355 356

		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
357 358
	}

S
Stephen Hemminger 已提交
359
	ctrl = PHY_CT_RESET;
360 361
	ct1000 = 0;
	adv = PHY_AN_CSMA;
362
	reg = 0;
363 364

	if (sky2->autoneg == AUTONEG_ENABLE) {
S
Stephen Hemminger 已提交
365
		if (sky2_is_copper(hw)) {
366 367 368 369 370 371 372 373 374 375 376 377
			if (sky2->advertising & ADVERTISED_1000baseT_Full)
				ct1000 |= PHY_M_1000C_AFD;
			if (sky2->advertising & ADVERTISED_1000baseT_Half)
				ct1000 |= PHY_M_1000C_AHD;
			if (sky2->advertising & ADVERTISED_100baseT_Full)
				adv |= PHY_M_AN_100_FD;
			if (sky2->advertising & ADVERTISED_100baseT_Half)
				adv |= PHY_M_AN_100_HD;
			if (sky2->advertising & ADVERTISED_10baseT_Full)
				adv |= PHY_M_AN_10_FD;
			if (sky2->advertising & ADVERTISED_10baseT_Half)
				adv |= PHY_M_AN_10_HD;
S
Stephen Hemminger 已提交
378

379
			adv |= copper_fc_adv[sky2->flow_mode];
S
Stephen Hemminger 已提交
380 381 382 383 384
		} else {	/* special defines for FIBER (88E1040S only) */
			if (sky2->advertising & ADVERTISED_1000baseT_Full)
				adv |= PHY_M_AN_1000X_AFD;
			if (sky2->advertising & ADVERTISED_1000baseT_Half)
				adv |= PHY_M_AN_1000X_AHD;
385

386
			adv |= fiber_fc_adv[sky2->flow_mode];
S
Stephen Hemminger 已提交
387
		}
388 389 390 391 392 393 394

		/* Restart Auto-negotiation */
		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
	} else {
		/* forced speed/duplex settings */
		ct1000 = PHY_M_1000C_MSE;

395 396
		/* Disable auto update for duplex flow control and speed */
		reg |= GM_GPCR_AU_ALL_DIS;
397 398 399 400

		switch (sky2->speed) {
		case SPEED_1000:
			ctrl |= PHY_CT_SP1000;
401
			reg |= GM_GPCR_SPEED_1000;
402 403 404
			break;
		case SPEED_100:
			ctrl |= PHY_CT_SP100;
405
			reg |= GM_GPCR_SPEED_100;
406 407 408
			break;
		}

409 410 411
		if (sky2->duplex == DUPLEX_FULL) {
			reg |= GM_GPCR_DUP_FULL;
			ctrl |= PHY_CT_DUP_MD;
412 413
		} else if (sky2->speed < SPEED_1000)
			sky2->flow_mode = FC_NONE;
414 415


416
 		reg |= gm_fc_disable[sky2->flow_mode];
417 418

		/* Forward pause packets to GMAC? */
419
		if (sky2->flow_mode & FC_RX)
420 421 422
			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
		else
			sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
423 424
	}

425 426
	gma_write16(hw, port, GM_GP_CTRL, reg);

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
	if (hw->chip_id != CHIP_ID_YUKON_FE)
		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);

	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);

	/* Setup Phy LED's */
	ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
	ledover = 0;

	switch (hw->chip_id) {
	case CHIP_ID_YUKON_FE:
		/* on 88E3082 these bits are at 11..9 (shifted left) */
		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;

		ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);

		/* delete ACT LED control bits */
		ctrl &= ~PHY_M_FELP_LED1_MSK;
		/* change ACT LED control to blink mode */
		ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
		break;

	case CHIP_ID_YUKON_XL:
S
Stephen Hemminger 已提交
452
		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
453 454 455 456 457

		/* select page 3 to access LED control register */
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);

		/* set LED Function Control register */
458 459 460 461 462
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
			      PHY_M_LEDC_INIT_CTRL(7) |	/* 10 Mbps */
			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
			      PHY_M_LEDC_STA0_CTRL(7)));	/* 1000 Mbps */
463 464 465

		/* set Polarity Control register */
		gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
S
Stephen Hemminger 已提交
466 467 468 469 470 471
			     (PHY_M_POLC_LS1_P_MIX(4) |
			      PHY_M_POLC_IS0_P_MIX(4) |
			      PHY_M_POLC_LOS_CTRL(2) |
			      PHY_M_POLC_INIT_CTRL(2) |
			      PHY_M_POLC_STA1_CTRL(2) |
			      PHY_M_POLC_STA0_CTRL(2)));
472 473

		/* restore page register */
S
Stephen Hemminger 已提交
474
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
475
		break;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	case CHIP_ID_YUKON_EC_U:
		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);

		/* select page 3 to access LED control register */
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);

		/* set LED Function Control register */
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
			     (PHY_M_LEDC_LOS_CTRL(1) |	/* LINK/ACT */
			      PHY_M_LEDC_INIT_CTRL(8) |	/* 10 Mbps */
			      PHY_M_LEDC_STA1_CTRL(7) |	/* 100 Mbps */
			      PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */

		/* set Blink Rate in LED Timer Control Register */
		gm_phy_write(hw, port, PHY_MARV_INT_MASK,
			     ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
		/* restore page register */
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
		break;
495 496 497 498 499

	default:
		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
		/* turn off the Rx LED (LED_RX) */
S
Stephen Hemminger 已提交
500
		ledover &= ~PHY_M_LED_MO_RX;
501 502
	}

503
	if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
504
		/* apply fixes in PHY AFE */
505 506 507
		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);

508
		/* increase differential signal amplitude in 10BASE-T */
509 510
		gm_phy_write(hw, port, 0x18, 0xaa99);
		gm_phy_write(hw, port, 0x17, 0x2011);
511

512
		/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
513 514
		gm_phy_write(hw, port, 0x18, 0xa204);
		gm_phy_write(hw, port, 0x17, 0x2002);
515 516

		/* set page register to 0 */
517
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
518 519
	} else {
		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
520

521 522
		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
			/* turn on 100 Mbps LED (LED_LINK100) */
S
Stephen Hemminger 已提交
523
			ledover |= PHY_M_LED_MO_100;
524
		}
525

526 527 528 529
		if (ledover)
			gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);

	}
530

S
shemminger@osdl.org 已提交
531
	/* Enable phy interrupt on auto-negotiation complete (or link up) */
532 533 534 535 536 537
	if (sky2->autoneg == AUTONEG_ENABLE)
		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
	else
		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
}

538 539 540 541 542 543 544 545 546 547
static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
{
	u32 reg1;
	static const u32 phy_power[]
		= { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };

	/* looks like this XL is back asswards .. */
	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
		onoff = !onoff;

548
	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
549 550 551 552 553 554 555 556
	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
	if (onoff)
		/* Turn off phy power saving */
		reg1 &= ~phy_power[port];
	else
		reg1 |= phy_power[port];

	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
S
shemminger@osdl.org 已提交
557
	sky2_pci_read32(hw, PCI_DEV_REG1);
558
	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
559 560 561
	udelay(100);
}

562 563 564
/* Force a renegotiation */
static void sky2_phy_reinit(struct sky2_port *sky2)
{
565
	spin_lock_bh(&sky2->phy_lock);
566
	sky2_phy_init(sky2->hw, sky2->port);
567
	spin_unlock_bh(&sky2->phy_lock);
568 569
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
/* Put device in state to listen for Wake On Lan */
static void sky2_wol_init(struct sky2_port *sky2)
{
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	enum flow_control save_mode;
	u16 ctrl;
	u32 reg1;

	/* Bring hardware out of reset */
	sky2_write16(hw, B0_CTST, CS_RST_CLR);
	sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);

	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);

	/* Force to 10/100
	 * sky2_reset will re-enable on resume
	 */
	save_mode = sky2->flow_mode;
	ctrl = sky2->advertising;

	sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
	sky2->flow_mode = FC_NONE;
	sky2_phy_power(hw, port, 1);
	sky2_phy_reinit(sky2);

	sky2->flow_mode = save_mode;
	sky2->advertising = ctrl;

	/* Set GMAC to no flow control and auto update for speed/duplex */
	gma_write16(hw, port, GM_GP_CTRL,
		    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
		    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);

	/* Set WOL address */
	memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
		    sky2->netdev->dev_addr, ETH_ALEN);

	/* Turn on appropriate WOL control bits */
	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
	ctrl = 0;
	if (sky2->wol & WAKE_PHY)
		ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
	else
		ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;

	if (sky2->wol & WAKE_MAGIC)
		ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
	else
		ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;

	ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
	sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);

	/* Turn on legacy PCI-Express PME mode */
	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
	reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
	reg1 |= PCI_Y2_PME_LEGACY;
	sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);

	/* block receiver */
	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);

}

637 638 639 640 641 642 643
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
{
	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
	u16 reg;
	int i;
	const u8 *addr = hw->dev[port]->dev_addr;

644 645
	sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
	sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR|GPC_ENA_PAUSE);
646 647 648

	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);

S
Stephen Hemminger 已提交
649
	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
650 651 652 653 654 655 656 657 658 659 660
		/* WA DEV_472 -- looks like crossed wires on port 2 */
		/* clear GMAC 1 Control reset */
		sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
		do {
			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
		} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
			 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
			 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
	}

S
Stephen Hemminger 已提交
661
	sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
662

663 664 665
	/* Enable Transmit FIFO Underrun */
	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);

666
	spin_lock_bh(&sky2->phy_lock);
667
	sky2_phy_init(hw, port);
668
	spin_unlock_bh(&sky2->phy_lock);
669 670 671 672 673

	/* MIB clear */
	reg = gma_read16(hw, port, GM_PHY_ADDR);
	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);

674 675
	for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
		gma_read16(hw, port, i);
676 677 678 679 680 681 682
	gma_write16(hw, port, GM_PHY_ADDR, reg);

	/* transmit control */
	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));

	/* receive control reg: unicast + multicast + no FCS  */
	gma_write16(hw, port, GM_RX_CTRL,
S
Stephen Hemminger 已提交
683
		    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
684 685 686 687 688 689 690 691 692 693 694 695 696

	/* transmit flow control */
	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);

	/* transmit parameter */
	gma_write16(hw, port, GM_TX_PARAM,
		    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
		    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
		    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
		    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));

	/* serial mode register */
	reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
697
		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
698

699
	if (hw->dev[port]->mtu > ETH_DATA_LEN)
700 701 702 703 704 705 706
		reg |= GM_SMOD_JUMBO_ENA;

	gma_write16(hw, port, GM_SERIAL_MODE, reg);

	/* virtual address for data */
	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);

S
Stephen Hemminger 已提交
707 708 709 710
	/* physical address: used for pause frames */
	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);

	/* ignore counter overflows */
711 712 713 714 715 716
	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);

	/* Configure Rx MAC FIFO */
	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
717 718
	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
		     GMF_OPER_ON | GMF_RX_F_FL_ON);
719

S
shemminger@osdl.org 已提交
720
	/* Flush Rx MAC FIFO on any flow control or error */
721
	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
722

723 724
	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
725 726 727 728

	/* Configure Tx MAC FIFO */
	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
729 730

	if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
731
		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
732 733 734 735 736 737 738 739 740
		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
			/* set Tx GMAC FIFO Almost Empty Threshold */
			sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR), 0x180);
			/* Disable Store & Forward mode for TX */
			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
		}
	}

741 742
}

743 744
/* Assign Ram Buffer allocation to queue */
static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
745
{
746 747 748 749 750 751
	u32 end;

	/* convert from K bytes to qwords used for hw register */
	start *= 1024/8;
	space *= 1024/8;
	end = start + space - 1;
S
Stephen Hemminger 已提交
752

753 754 755 756 757 758 759
	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
	sky2_write32(hw, RB_ADDR(q, RB_START), start);
	sky2_write32(hw, RB_ADDR(q, RB_END), end);
	sky2_write32(hw, RB_ADDR(q, RB_WP), start);
	sky2_write32(hw, RB_ADDR(q, RB_RP), start);

	if (q == Q_R1 || q == Q_R2) {
760
		u32 tp = space - space/4;
S
Stephen Hemminger 已提交
761

762 763 764 765 766 767
		/* On receive queue's set the thresholds
		 * give receiver priority when > 3/4 full
		 * send pause when down to 2K
		 */
		sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
		sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
S
Stephen Hemminger 已提交
768

769 770 771
		tp = space - 2048/8;
		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
772 773 774 775 776 777 778 779
	} else {
		/* Enable store & forward on Tx queue's because
		 * Tx FIFO is only 1K on Yukon
		 */
		sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
	}

	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
S
Stephen Hemminger 已提交
780
	sky2_read8(hw, RB_ADDR(q, RB_CTRL));
781 782 783
}

/* Setup Bus Memory Interface */
784
static void sky2_qset(struct sky2_hw *hw, u16 q)
785 786 787 788
{
	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
789
	sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
790 791 792 793 794
}

/* Setup prefetch unit registers. This is the interface between
 * hardware and driver list elements
 */
795
static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
796 797 798 799 800 801 802 803
				      u64 addr, u32 last)
{
	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
	sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
S
Stephen Hemminger 已提交
804 805

	sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
806 807
}

S
Stephen Hemminger 已提交
808 809 810 811
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
{
	struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;

812
	sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
813
	le->ctrl = 0;
S
Stephen Hemminger 已提交
814 815
	return le;
}
816

817 818 819 820 821 822
static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
					    struct sky2_tx_le *le)
{
	return sky2->tx_ring + (le - sky2->tx_le);
}

823 824
/* Update chip's next pointer */
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
825
{
S
shemminger@osdl.org 已提交
826
	q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
827
	wmb();
S
shemminger@osdl.org 已提交
828 829
	sky2_write16(hw, q, idx);
	sky2_read16(hw, q);
830 831
}

S
Stephen Hemminger 已提交
832

833 834 835
static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
{
	struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
836
	sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
837
	le->ctrl = 0;
838 839 840
	return le;
}

841 842 843
/* Return high part of DMA address (could be 32 or 64 bit) */
static inline u32 high32(dma_addr_t a)
{
844
	return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
845 846
}

847 848 849
/* Build description to hardware for one receive segment */
static void sky2_rx_add(struct sky2_port *sky2,  u8 op,
			dma_addr_t map, unsigned len)
850 851
{
	struct sky2_rx_le *le;
852
	u32 hi = high32(map);
853

S
Stephen Hemminger 已提交
854
	if (sky2->rx_addr64 != hi) {
855
		le = sky2_next_rx(sky2);
S
Stephen Hemminger 已提交
856
		le->addr = cpu_to_le32(hi);
857
		le->opcode = OP_ADDR64 | HW_OWNER;
858
		sky2->rx_addr64 = high32(map + len);
859
	}
S
Stephen Hemminger 已提交
860

861
	le = sky2_next_rx(sky2);
862 863
	le->addr = cpu_to_le32((u32) map);
	le->length = cpu_to_le16(len);
864
	le->opcode = op | HW_OWNER;
865 866
}

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
/* Build description to hardware for one possibly fragmented skb */
static void sky2_rx_submit(struct sky2_port *sky2,
			   const struct rx_ring_info *re)
{
	int i;

	sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);

	for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
		sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
}


static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
			    unsigned size)
{
	struct sk_buff *skb = re->skb;
	int i;

	re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
	pci_unmap_len_set(re, data_size, size);

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
		re->frag_addr[i] = pci_map_page(pdev,
						skb_shinfo(skb)->frags[i].page,
						skb_shinfo(skb)->frags[i].page_offset,
						skb_shinfo(skb)->frags[i].size,
						PCI_DMA_FROMDEVICE);
}

static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
{
	struct sk_buff *skb = re->skb;
	int i;

	pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
			 PCI_DMA_FROMDEVICE);

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
		pci_unmap_page(pdev, re->frag_addr[i],
			       skb_shinfo(skb)->frags[i].size,
			       PCI_DMA_FROMDEVICE);
}
S
Stephen Hemminger 已提交
910

911 912 913 914
/* Tell chip where to start receive checksum.
 * Actually has two checksums, but set both same to avoid possible byte
 * order problems.
 */
S
Stephen Hemminger 已提交
915
static void rx_set_checksum(struct sky2_port *sky2)
916 917 918 919
{
	struct sky2_rx_le *le;

	le = sky2_next_rx(sky2);
S
Stephen Hemminger 已提交
920
	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
921 922
	le->ctrl = 0;
	le->opcode = OP_TCPSTART | HW_OWNER;
S
Stephen Hemminger 已提交
923 924 925 926

	sky2_write32(sky2->hw,
		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
		     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
927 928 929

}

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
/*
 * The RX Stop command will not work for Yukon-2 if the BMU does not
 * reach the end of packet and since we can't make sure that we have
 * incoming data, we must reset the BMU while it is not doing a DMA
 * transfer. Since it is possible that the RX path is still active,
 * the RX RAM buffer will be stopped first, so any possible incoming
 * data will not trigger a DMA. After the RAM buffer is stopped, the
 * BMU is polled until any DMA in progress is ended and only then it
 * will be reset.
 */
static void sky2_rx_stop(struct sky2_port *sky2)
{
	struct sky2_hw *hw = sky2->hw;
	unsigned rxq = rxqaddr[sky2->port];
	int i;

	/* disable the RAM Buffer receive queue */
	sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);

	for (i = 0; i < 0xffff; i++)
		if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
		    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
			goto stopped;

	printk(KERN_WARNING PFX "%s: receiver stop failed\n",
	       sky2->netdev->name);
stopped:
	sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);

	/* reset the Rx prefetch unit */
	sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
}
S
Stephen Hemminger 已提交
962

S
shemminger@osdl.org 已提交
963
/* Clean out receive buffer area, assumes receiver hardware stopped */
964 965 966 967 968
static void sky2_rx_clean(struct sky2_port *sky2)
{
	unsigned i;

	memset(sky2->rx_le, 0, RX_LE_BYTES);
S
Stephen Hemminger 已提交
969
	for (i = 0; i < sky2->rx_pending; i++) {
970
		struct rx_ring_info *re = sky2->rx_ring + i;
971 972

		if (re->skb) {
973
			sky2_rx_unmap_skb(sky2->hw->pdev, re);
974 975 976 977 978 979
			kfree_skb(re->skb);
			re->skb = NULL;
		}
	}
}

980 981 982 983 984 985 986 987 988 989 990
/* Basic MII support */
static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	struct mii_ioctl_data *data = if_mii(ifr);
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	int err = -EOPNOTSUPP;

	if (!netif_running(dev))
		return -ENODEV;	/* Phy still in reset */

991
	switch (cmd) {
992 993 994 995 996 997
	case SIOCGMIIPHY:
		data->phy_id = PHY_ADDR_MARV;

		/* fallthru */
	case SIOCGMIIREG: {
		u16 val = 0;
998

999
		spin_lock_bh(&sky2->phy_lock);
1000
		err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1001
		spin_unlock_bh(&sky2->phy_lock);
1002

1003 1004 1005 1006 1007 1008 1009 1010
		data->val_out = val;
		break;
	}

	case SIOCSMIIREG:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;

1011
		spin_lock_bh(&sky2->phy_lock);
1012 1013
		err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
				   data->val_in);
1014
		spin_unlock_bh(&sky2->phy_lock);
1015 1016 1017 1018 1019
		break;
	}
	return err;
}

1020 1021 1022 1023 1024 1025 1026
#ifdef SKY2_VLAN_TAG_USED
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	u16 port = sky2->port;

1027
	netif_tx_lock_bh(dev);
1028 1029 1030 1031 1032

	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
	sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
	sky2->vlgrp = grp;

1033
	netif_tx_unlock_bh(dev);
1034 1035 1036 1037 1038 1039 1040 1041
}

static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	u16 port = sky2->port;

1042
	netif_tx_lock_bh(dev);
1043 1044 1045 1046 1047 1048

	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
	sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
	if (sky2->vlgrp)
		sky2->vlgrp->vlan_devices[vid] = NULL;

1049
	netif_tx_unlock_bh(dev);
1050 1051 1052
}
#endif

1053
/*
1054 1055 1056
 * Allocate an skb for receiving. If the MTU is large enough
 * make the skb non-linear with a fragment list of pages.
 *
1057 1058
 * It appears the hardware has a bug in the FIFO logic that
 * cause it to hang if the FIFO gets overrun and the receive buffer
1059 1060
 * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
 * aligned except if slab debugging is enabled.
1061
 */
1062
static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1063 1064
{
	struct sk_buff *skb;
1065 1066
	unsigned long p;
	int i;
1067

1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + RX_SKB_ALIGN);
	if (!skb)
		goto nomem;

	p = (unsigned long) skb->data;
	skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);

	for (i = 0; i < sky2->rx_nfrags; i++) {
		struct page *page = alloc_page(GFP_ATOMIC);

		if (!page)
			goto free_partial;
		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1081 1082 1083
	}

	return skb;
1084 1085 1086 1087
free_partial:
	kfree_skb(skb);
nomem:
	return NULL;
1088 1089
}

1090 1091
/*
 * Allocate and setup receiver buffer pool.
1092 1093 1094 1095 1096 1097
 * Normal case this ends up creating one list element for skb
 * in the receive ring. Worst case if using large MTU and each
 * allocation falls on a different 64 bit region, that results
 * in 6 list elements per ring entry.
 * One element is used for checksum enable/disable, and one
 * extra to avoid wrap.
1098
 */
1099
static int sky2_rx_start(struct sky2_port *sky2)
1100
{
1101
	struct sky2_hw *hw = sky2->hw;
1102
	struct rx_ring_info *re;
1103
	unsigned rxq = rxqaddr[sky2->port];
1104
	unsigned i, size, space, thresh;
1105

1106
	sky2->rx_put = sky2->rx_next = 0;
1107
	sky2_qset(hw, rxq);
1108

1109 1110 1111 1112 1113 1114
	/* On PCI express lowering the watermark gives better performance */
	if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
		sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);

	/* These chips have no ram buffer?
	 * MAC Rx RAM Read is controlled by hardware */
1115
	if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1116 1117
	    (hw->chip_rev == CHIP_REV_YU_EC_U_A1
	     || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1118 1119
		sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);

1120 1121 1122
	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);

	rx_set_checksum(sky2);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

	/* Space needed for frame data + headers rounded up */
	size = ALIGN(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8)
		+ 8;

	/* Stopping point for hardware truncation */
	thresh = (size - 8) / sizeof(u32);

	/* Account for overhead of skb - to avoid order > 0 allocation */
	space = SKB_DATA_ALIGN(size) + NET_SKB_PAD
		+ sizeof(struct skb_shared_info);

	sky2->rx_nfrags = space >> PAGE_SHIFT;
	BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));

	if (sky2->rx_nfrags != 0) {
		/* Compute residue after pages */
		space = sky2->rx_nfrags << PAGE_SHIFT;

		if (space < size)
			size -= space;
		else
			size = 0;

		/* Optimize to handle small packets and headers */
		if (size < copybreak)
			size = copybreak;
		if (size < ETH_HLEN)
			size = ETH_HLEN;
	}
	sky2->rx_data_size = size;

	/* Fill Rx ring */
S
Stephen Hemminger 已提交
1156
	for (i = 0; i < sky2->rx_pending; i++) {
1157
		re = sky2->rx_ring + i;
1158

1159
		re->skb = sky2_rx_alloc(sky2);
1160 1161 1162
		if (!re->skb)
			goto nomem;

1163 1164
		sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size);
		sky2_rx_submit(sky2, re);
1165 1166
	}

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	/*
	 * The receiver hangs if it receives frames larger than the
	 * packet buffer. As a workaround, truncate oversize frames, but
	 * the register is limited to 9 bits, so if you do frames > 2052
	 * you better get the MTU right!
	 */
	if (thresh > 0x1ff)
		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
	else {
		sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
		sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
	}

1180 1181
	/* Tell chip about available buffers */
	sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	return 0;
nomem:
	sky2_rx_clean(sky2);
	return -ENOMEM;
}

/* Bring up network interface. */
static int sky2_up(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
1194
	u32 ramsize, imask;
1195
	int cap, err = -ENOMEM;
1196
	struct net_device *otherdev = hw->dev[sky2->port^1];
1197

1198 1199 1200
	/*
 	 * On dual port PCI-X card, there is an problem where status
	 * can be received out of order due to split transactions
1201
	 */
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	if (otherdev && netif_running(otherdev) &&
 	    (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
 		struct sky2_port *osky2 = netdev_priv(otherdev);
 		u16 cmd;

 		cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
 		cmd &= ~PCI_X_CMD_MAX_SPLIT;
 		sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);

 		sky2->rx_csum = 0;
 		osky2->rx_csum = 0;
 	}
1214

1215 1216 1217 1218 1219
	if (netif_msg_ifup(sky2))
		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);

	/* must be power of 2 */
	sky2->tx_le = pci_alloc_consistent(hw->pdev,
S
Stephen Hemminger 已提交
1220 1221
					   TX_RING_SIZE *
					   sizeof(struct sky2_tx_le),
1222 1223 1224 1225
					   &sky2->tx_le_map);
	if (!sky2->tx_le)
		goto err_out;

1226
	sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info),
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
				GFP_KERNEL);
	if (!sky2->tx_ring)
		goto err_out;
	sky2->tx_prod = sky2->tx_cons = 0;

	sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
					   &sky2->rx_le_map);
	if (!sky2->rx_le)
		goto err_out;
	memset(sky2->rx_le, 0, RX_LE_BYTES);

1238
	sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1239 1240 1241 1242
				GFP_KERNEL);
	if (!sky2->rx_ring)
		goto err_out;

1243 1244
	sky2_phy_power(hw, port, 1);

1245 1246
	sky2_mac_init(hw, port);

1247 1248 1249
	/* Register is number of 4K blocks on internal RAM buffer. */
	ramsize = sky2_read8(hw, B2_E_0) * 4;
	printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1250

1251 1252
	if (ramsize > 0) {
		u32 rxspace;
1253

1254 1255 1256 1257
		if (ramsize < 16)
			rxspace = ramsize / 2;
		else
			rxspace = 8 + (2*(ramsize - 16))/3;
1258

1259 1260 1261 1262 1263 1264 1265
		sky2_ramset(hw, rxqaddr[port], 0, rxspace);
		sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);

		/* Make sure SyncQ is disabled */
		sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
			    RB_RST_SET);
	}
S
Stephen Hemminger 已提交
1266

1267
	sky2_qset(hw, txqaddr[port]);
1268

1269
	/* Set almost empty threshold */
1270 1271
	if (hw->chip_id == CHIP_ID_YUKON_EC_U
	    && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1272
		sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
1273

1274 1275
	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
			   TX_RING_SIZE - 1);
1276

1277
	err = sky2_rx_start(sky2);
1278 1279 1280 1281
	if (err)
		goto err_out;

	/* Enable interrupts from phy/mac for port */
1282
	imask = sky2_read32(hw, B0_IMSK);
S
Stephen Hemminger 已提交
1283
	imask |= portirq_msk[port];
1284 1285
	sky2_write32(hw, B0_IMSK, imask);

1286 1287 1288
	return 0;

err_out:
1289
	if (sky2->rx_le) {
1290 1291
		pci_free_consistent(hw->pdev, RX_LE_BYTES,
				    sky2->rx_le, sky2->rx_le_map);
1292 1293 1294
		sky2->rx_le = NULL;
	}
	if (sky2->tx_le) {
1295 1296 1297
		pci_free_consistent(hw->pdev,
				    TX_RING_SIZE * sizeof(struct sky2_tx_le),
				    sky2->tx_le, sky2->tx_le_map);
1298 1299 1300 1301
		sky2->tx_le = NULL;
	}
	kfree(sky2->tx_ring);
	kfree(sky2->rx_ring);
1302

1303 1304
	sky2->tx_ring = NULL;
	sky2->rx_ring = NULL;
1305 1306 1307
	return err;
}

S
Stephen Hemminger 已提交
1308 1309 1310
/* Modular subtraction in ring */
static inline int tx_dist(unsigned tail, unsigned head)
{
1311
	return (head - tail) & (TX_RING_SIZE - 1);
S
Stephen Hemminger 已提交
1312
}
1313

S
Stephen Hemminger 已提交
1314 1315
/* Number of list elements available for next tx */
static inline int tx_avail(const struct sky2_port *sky2)
1316
{
S
Stephen Hemminger 已提交
1317
	return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1318 1319
}

S
Stephen Hemminger 已提交
1320
/* Estimate of number of transmit list elements required */
1321
static unsigned tx_le_req(const struct sk_buff *skb)
1322
{
S
Stephen Hemminger 已提交
1323 1324 1325 1326 1327
	unsigned count;

	count = sizeof(dma_addr_t) / sizeof(u32);
	count += skb_shinfo(skb)->nr_frags * count;

H
Herbert Xu 已提交
1328
	if (skb_is_gso(skb))
S
Stephen Hemminger 已提交
1329 1330
		++count;

1331
	if (skb->ip_summed == CHECKSUM_PARTIAL)
S
Stephen Hemminger 已提交
1332 1333 1334
		++count;

	return count;
1335 1336
}

S
Stephen Hemminger 已提交
1337 1338 1339 1340 1341 1342
/*
 * Put one packet in ring for transmit.
 * A single packet can generate multiple list elements, and
 * the number of ring elements will probably be less than the number
 * of list elements used.
 */
1343 1344 1345 1346
static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
1347
	struct sky2_tx_le *le = NULL;
1348
	struct tx_ring_info *re;
1349 1350 1351 1352 1353 1354
	unsigned i, len;
	dma_addr_t mapping;
	u32 addr64;
	u16 mss;
	u8 ctrl;

1355 1356
 	if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
  		return NETDEV_TX_BUSY;
1357

S
Stephen Hemminger 已提交
1358
	if (unlikely(netif_msg_tx_queued(sky2)))
1359 1360 1361 1362 1363
		printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
		       dev->name, sky2->tx_prod, skb->len);

	len = skb_headlen(skb);
	mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1364
	addr64 = high32(mapping);
S
Stephen Hemminger 已提交
1365

1366 1367
	/* Send high bits if changed or crosses boundary */
	if (addr64 != sky2->tx_addr64 || high32(mapping + len) != sky2->tx_addr64) {
S
Stephen Hemminger 已提交
1368
		le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1369
		le->addr = cpu_to_le32(addr64);
S
Stephen Hemminger 已提交
1370
		le->opcode = OP_ADDR64 | HW_OWNER;
1371
		sky2->tx_addr64 = high32(mapping + len);
S
Stephen Hemminger 已提交
1372
	}
1373 1374

	/* Check for TCP Segmentation Offload */
1375
	mss = skb_shinfo(skb)->gso_size;
S
Stephen Hemminger 已提交
1376
	if (mss != 0) {
1377 1378 1379 1380
		mss += ((skb->h.th->doff - 5) * 4);	/* TCP options */
		mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
		mss += ETH_HLEN;

1381 1382
		if (mss != sky2->tx_last_mss) {
			le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1383
			le->addr = cpu_to_le32(mss);
1384 1385 1386
			le->opcode = OP_LRGLEN | HW_OWNER;
			sky2->tx_last_mss = mss;
		}
1387 1388 1389
	}

	ctrl = 0;
1390 1391 1392 1393 1394
#ifdef SKY2_VLAN_TAG_USED
	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
	if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
		if (!le) {
			le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1395
			le->addr = 0;
1396 1397 1398 1399 1400 1401 1402 1403 1404
			le->opcode = OP_VLAN|HW_OWNER;
		} else
			le->opcode |= OP_VLAN;
		le->length = cpu_to_be16(vlan_tx_tag_get(skb));
		ctrl |= INS_VLAN;
	}
#endif

	/* Handle TCP checksum offload */
1405
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
S
Stephen Hemminger 已提交
1406 1407 1408 1409
		unsigned offset = skb->h.raw - skb->data;
		u32 tcpsum;

		tcpsum = offset << 16;		/* sum start */
A
Al Viro 已提交
1410
		tcpsum |= offset + skb->csum_offset;	/* sum write */
1411 1412 1413 1414 1415

		ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
		if (skb->nh.iph->protocol == IPPROTO_UDP)
			ctrl |= UDPTCP;

S
Stephen Hemminger 已提交
1416 1417
		if (tcpsum != sky2->tx_tcpsum) {
			sky2->tx_tcpsum = tcpsum;
1418 1419

			le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1420
			le->addr = cpu_to_le32(tcpsum);
1421 1422 1423 1424
			le->length = 0;	/* initial checksum value */
			le->ctrl = 1;	/* one packet */
			le->opcode = OP_TCPLISW | HW_OWNER;
		}
1425 1426 1427
	}

	le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1428
	le->addr = cpu_to_le32((u32) mapping);
1429 1430
	le->length = cpu_to_le16(len);
	le->ctrl = ctrl;
S
Stephen Hemminger 已提交
1431
	le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1432

1433
	re = tx_le_re(sky2, le);
1434
	re->skb = skb;
1435
	pci_unmap_addr_set(re, mapaddr, mapping);
1436
	pci_unmap_len_set(re, maplen, len);
1437 1438

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1439
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1440 1441 1442

		mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
				       frag->size, PCI_DMA_TODEVICE);
1443
		addr64 = high32(mapping);
S
Stephen Hemminger 已提交
1444 1445
		if (addr64 != sky2->tx_addr64) {
			le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1446
			le->addr = cpu_to_le32(addr64);
S
Stephen Hemminger 已提交
1447 1448 1449
			le->ctrl = 0;
			le->opcode = OP_ADDR64 | HW_OWNER;
			sky2->tx_addr64 = addr64;
1450 1451 1452
		}

		le = get_tx_le(sky2);
S
Stephen Hemminger 已提交
1453
		le->addr = cpu_to_le32((u32) mapping);
1454 1455
		le->length = cpu_to_le16(frag->size);
		le->ctrl = ctrl;
S
Stephen Hemminger 已提交
1456
		le->opcode = OP_BUFFER | HW_OWNER;
1457

1458 1459 1460 1461
		re = tx_le_re(sky2, le);
		re->skb = skb;
		pci_unmap_addr_set(re, mapaddr, mapping);
		pci_unmap_len_set(re, maplen, frag->size);
1462
	}
1463

1464 1465
	le->ctrl |= EOP;

1466 1467
	if (tx_avail(sky2) <= MAX_SKB_TX_LE)
		netif_stop_queue(dev);
1468

1469
	sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1470 1471 1472 1473 1474 1475

	dev->trans_start = jiffies;
	return NETDEV_TX_OK;
}

/*
S
Stephen Hemminger 已提交
1476 1477 1478
 * Free ring elements from starting at tx_cons until "done"
 *
 * NB: the hardware will tell us about partial completion of multi-part
1479
 *     buffers so make sure not to free skb to early.
1480
 */
1481
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1482
{
1483
	struct net_device *dev = sky2->netdev;
1484
	struct pci_dev *pdev = sky2->hw->pdev;
1485
	unsigned idx;
1486

1487
	BUG_ON(done >= TX_RING_SIZE);
1488

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
	for (idx = sky2->tx_cons; idx != done;
	     idx = RING_NEXT(idx, TX_RING_SIZE)) {
		struct sky2_tx_le *le = sky2->tx_le + idx;
		struct tx_ring_info *re = sky2->tx_ring + idx;

		switch(le->opcode & ~HW_OWNER) {
		case OP_LARGESEND:
		case OP_PACKET:
			pci_unmap_single(pdev,
					 pci_unmap_addr(re, mapaddr),
					 pci_unmap_len(re, maplen),
					 PCI_DMA_TODEVICE);
1501
			break;
1502 1503 1504
		case OP_BUFFER:
			pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
				       pci_unmap_len(re, maplen),
1505
				       PCI_DMA_TODEVICE);
1506 1507 1508 1509 1510 1511 1512
			break;
		}

		if (le->ctrl & EOP) {
			if (unlikely(netif_msg_tx_done(sky2)))
				printk(KERN_DEBUG "%s: tx done %u\n",
				       dev->name, idx);
1513 1514 1515
			sky2->net_stats.tx_packets++;
			sky2->net_stats.tx_bytes += re->skb->len;

1516
			dev_kfree_skb_any(re->skb);
1517 1518
		}

1519
		le->opcode = 0;	/* paranoia */
S
Stephen Hemminger 已提交
1520 1521
	}

1522
	sky2->tx_cons = idx;
1523
	if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1524 1525 1526 1527
		netif_wake_queue(dev);
}

/* Cleanup all untransmitted buffers, assume transmitter not running */
1528
static void sky2_tx_clean(struct net_device *dev)
1529
{
1530 1531 1532
	struct sky2_port *sky2 = netdev_priv(dev);

	netif_tx_lock_bh(dev);
1533
	sky2_tx_complete(sky2, sky2->tx_prod);
1534
	netif_tx_unlock_bh(dev);
1535 1536 1537 1538 1539 1540 1541 1542 1543
}

/* Network shutdown */
static int sky2_down(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	u16 ctrl;
1544
	u32 imask;
1545

1546 1547 1548 1549
	/* Never really got started! */
	if (!sky2->tx_le)
		return 0;

1550 1551 1552
	if (netif_msg_ifdown(sky2))
		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);

1553
	/* Stop more packets from being queued */
1554 1555
	netif_stop_queue(dev);

S
Stephen Hemminger 已提交
1556 1557 1558 1559 1560
	/* Disable port IRQ */
	imask = sky2_read32(hw, B0_IMSK);
	imask &= ~portirq_msk[port];
	sky2_write32(hw, B0_IMSK, imask);

S
Stephen Hemminger 已提交
1561 1562 1563 1564 1565 1566 1567
	/*
	 * Both ports share the NAPI poll on port 0, so if necessary undo the
	 * the disable that is done in dev_close.
	 */
	if (sky2->port == 0 && hw->ports > 1)
		netif_poll_enable(dev);

1568
	sky2_gmac_reset(hw, port);
S
Stephen Hemminger 已提交
1569

1570 1571 1572 1573 1574
	/* Stop transmitter */
	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
	sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));

	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
S
Stephen Hemminger 已提交
1575
		     RB_RST_SET | RB_DIS_OP_MD);
1576

1577 1578
	/* WA for dev. #4.209 */
	if (hw->chip_id == CHIP_ID_YUKON_EC_U
1579
	    && (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1580 1581 1582 1583
		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
			     sky2->speed != SPEED_1000 ?
			     TX_STFW_ENA : TX_STFW_DIS);

1584
	ctrl = gma_read16(hw, port, GM_GP_CTRL);
S
Stephen Hemminger 已提交
1585
	ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1586 1587 1588 1589 1590
	gma_write16(hw, port, GM_GP_CTRL, ctrl);

	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);

	/* Workaround shared GMAC reset */
S
Stephen Hemminger 已提交
1591 1592
	if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
	      && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);

	/* Disable Force Sync bit and Enable Alloc bit */
	sky2_write8(hw, SK_REG(port, TXA_CTRL),
		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);

	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
	sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
	sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);

	/* Reset the PCI FIFO of the async Tx queue */
S
Stephen Hemminger 已提交
1604 1605
	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
		     BMU_RST_SET | BMU_FIFO_RST);
1606 1607 1608 1609 1610 1611 1612

	/* Reset the Tx prefetch units */
	sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
		     PREF_UNIT_RST_SET);

	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);

1613
	sky2_rx_stop(sky2);
1614 1615 1616 1617

	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);

1618 1619
	sky2_phy_power(hw, port, 0);

S
shemminger@osdl.org 已提交
1620
	/* turn off LED's */
1621 1622
	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);

1623 1624
	synchronize_irq(hw->pdev->irq);

1625
	sky2_tx_clean(dev);
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
	sky2_rx_clean(sky2);

	pci_free_consistent(hw->pdev, RX_LE_BYTES,
			    sky2->rx_le, sky2->rx_le_map);
	kfree(sky2->rx_ring);

	pci_free_consistent(hw->pdev,
			    TX_RING_SIZE * sizeof(struct sky2_tx_le),
			    sky2->tx_le, sky2->tx_le_map);
	kfree(sky2->tx_ring);

1637 1638 1639 1640 1641 1642
	sky2->tx_le = NULL;
	sky2->rx_le = NULL;

	sky2->rx_ring = NULL;
	sky2->tx_ring = NULL;

1643 1644 1645 1646 1647
	return 0;
}

static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
{
S
Stephen Hemminger 已提交
1648
	if (!sky2_is_copper(hw))
S
Stephen Hemminger 已提交
1649 1650
		return SPEED_1000;

1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	if (hw->chip_id == CHIP_ID_YUKON_FE)
		return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;

	switch (aux & PHY_M_PS_SPEED_MSK) {
	case PHY_M_PS_SPEED_1000:
		return SPEED_1000;
	case PHY_M_PS_SPEED_100:
		return SPEED_100;
	default:
		return SPEED_10;
	}
}

static void sky2_link_up(struct sky2_port *sky2)
{
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	u16 reg;
1669 1670 1671 1672 1673 1674
	static const char *fc_name[] = {
		[FC_NONE]	= "none",
		[FC_TX]		= "tx",
		[FC_RX]		= "rx",
		[FC_BOTH]	= "both",
	};
1675 1676

	/* enable Rx/Tx */
1677
	reg = gma_read16(hw, port, GM_GP_CTRL);
1678 1679 1680 1681 1682 1683 1684 1685 1686
	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
	gma_write16(hw, port, GM_GP_CTRL, reg);

	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);

	netif_carrier_on(sky2->netdev);
	netif_wake_queue(sky2->netdev);

	/* Turn on link LED */
S
Stephen Hemminger 已提交
1687
	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1688 1689
		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);

1690
	if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
S
Stephen Hemminger 已提交
1691
		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
		u16 led = PHY_M_LEDC_LOS_CTRL(1);	/* link active */

		switch(sky2->speed) {
		case SPEED_10:
			led |= PHY_M_LEDC_INIT_CTRL(7);
			break;

		case SPEED_100:
			led |= PHY_M_LEDC_STA1_CTRL(7);
			break;

		case SPEED_1000:
			led |= PHY_M_LEDC_STA0_CTRL(7);
			break;
		}
S
Stephen Hemminger 已提交
1707 1708

		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
1709
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
S
Stephen Hemminger 已提交
1710 1711 1712
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
	}

1713 1714
	if (netif_msg_link(sky2))
		printk(KERN_INFO PFX
S
shemminger@osdl.org 已提交
1715
		       "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1716 1717
		       sky2->netdev->name, sky2->speed,
		       sky2->duplex == DUPLEX_FULL ? "full" : "half",
1718
		       fc_name[sky2->flow_status]);
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
}

static void sky2_link_down(struct sky2_port *sky2)
{
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	u16 reg;

	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);

	reg = gma_read16(hw, port, GM_GP_CTRL);
	reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
	gma_write16(hw, port, GM_GP_CTRL, reg);

1733
	if (sky2->flow_status == FC_RX) {
1734 1735
		/* restore Asymmetric Pause bit */
		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
S
Stephen Hemminger 已提交
1736 1737
			     gm_phy_read(hw, port, PHY_MARV_AUNE_ADV)
			     | PHY_M_AN_ASP);
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	}

	netif_carrier_off(sky2->netdev);
	netif_stop_queue(sky2->netdev);

	/* Turn on link LED */
	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);

	if (netif_msg_link(sky2))
		printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1748

1749 1750 1751
	sky2_phy_init(hw, port);
}

1752 1753 1754 1755 1756 1757 1758 1759
static enum flow_control sky2_flow(int rx, int tx)
{
	if (rx)
		return tx ? FC_BOTH : FC_RX;
	else
		return tx ? FC_TX : FC_NONE;
}

S
Stephen Hemminger 已提交
1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
{
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	u16 lpa;

	lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);

	if (lpa & PHY_M_AN_RF) {
		printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
		return -1;
	}

	if (!(aux & PHY_M_PS_SPDUP_RES)) {
		printk(KERN_ERR PFX "%s: speed/duplex mismatch",
		       sky2->netdev->name);
		return -1;
	}

	sky2->speed = sky2_phy_speed(hw, aux);
S
Stephen Hemminger 已提交
1780
	sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
S
Stephen Hemminger 已提交
1781 1782

	/* Pause bits are offset (9..8) */
1783
	if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
S
Stephen Hemminger 已提交
1784 1785
		aux >>= 6;

1786 1787
	sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
				      aux & PHY_M_PS_TX_P_EN);
S
Stephen Hemminger 已提交
1788

1789
	if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1790
	    && hw->chip_id != CHIP_ID_YUKON_EC_U)
1791
		sky2->flow_status = FC_NONE;
1792

1793
	if (aux & PHY_M_PS_RX_P_EN)
S
Stephen Hemminger 已提交
1794 1795 1796 1797 1798 1799
		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
	else
		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);

	return 0;
}
1800

1801 1802
/* Interrupt from PHY */
static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1803
{
1804 1805
	struct net_device *dev = hw->dev[port];
	struct sky2_port *sky2 = netdev_priv(dev);
1806 1807
	u16 istatus, phystat;

S
Stephen Hemminger 已提交
1808 1809 1810
	if (!netif_running(dev))
		return;

1811 1812 1813 1814
	spin_lock(&sky2->phy_lock);
	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);

1815 1816 1817 1818
	if (netif_msg_intr(sky2))
		printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
		       sky2->netdev->name, istatus, phystat);

1819
	if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
S
Stephen Hemminger 已提交
1820 1821 1822 1823
		if (sky2_autoneg_done(sky2, phystat) == 0)
			sky2_link_up(sky2);
		goto out;
	}
1824

S
Stephen Hemminger 已提交
1825 1826
	if (istatus & PHY_M_IS_LSP_CHANGE)
		sky2->speed = sky2_phy_speed(hw, phystat);
1827

S
Stephen Hemminger 已提交
1828 1829 1830
	if (istatus & PHY_M_IS_DUP_CHANGE)
		sky2->duplex =
		    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1831

S
Stephen Hemminger 已提交
1832 1833
	if (istatus & PHY_M_IS_LST_CHANGE) {
		if (phystat & PHY_M_PS_LINK_UP)
1834
			sky2_link_up(sky2);
S
Stephen Hemminger 已提交
1835 1836
		else
			sky2_link_down(sky2);
1837
	}
S
Stephen Hemminger 已提交
1838
out:
1839
	spin_unlock(&sky2->phy_lock);
1840 1841
}

1842

S
Stephen Hemminger 已提交
1843
/* Transmit timeout is only called if we are running, carrier is up
1844
 * and tx queue is full (stopped).
S
Stephen Hemminger 已提交
1845
 * Called with netif_tx_lock held.
1846
 */
1847 1848 1849
static void sky2_tx_timeout(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
1850
	struct sky2_hw *hw = sky2->hw;
S
Stephen Hemminger 已提交
1851
	u32 imask;
1852 1853 1854 1855

	if (netif_msg_timer(sky2))
		printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);

1856
	printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
S
Stephen Hemminger 已提交
1857 1858 1859
	       dev->name, sky2->tx_cons, sky2->tx_prod,
	       sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
	       sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
1860

S
Stephen Hemminger 已提交
1861 1862 1863
	imask = sky2_read32(hw, B0_IMSK);	/* block IRQ in hw */
	sky2_write32(hw, B0_IMSK, 0);
	sky2_read32(hw, B0_IMSK);
1864

S
Stephen Hemminger 已提交
1865 1866
	netif_poll_disable(hw->dev[0]);		/* stop NAPI poll */
	synchronize_irq(hw->pdev->irq);
1867

S
Stephen Hemminger 已提交
1868 1869
	netif_start_queue(dev);			/* don't wakeup during flush */
	sky2_tx_complete(sky2, sky2->tx_prod);	/* Flush transmit queue */
1870

S
Stephen Hemminger 已提交
1871
	sky2_write32(hw, B0_IMSK, imask);
1872

S
Stephen Hemminger 已提交
1873
	sky2_phy_reinit(sky2);			/* this clears flow control etc */
1874 1875 1876 1877
}

static int sky2_change_mtu(struct net_device *dev, int new_mtu)
{
1878 1879 1880 1881
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	int err;
	u16 ctl, mode;
1882
	u32 imask;
1883 1884 1885 1886

	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
		return -EINVAL;

S
Stephen Hemminger 已提交
1887
	/* TSO on Yukon Ultra and MTU > 1500 not supported */
1888
	if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
S
Stephen Hemminger 已提交
1889
		dev->features &= ~NETIF_F_TSO;
1890

1891 1892 1893 1894 1895
	if (!netif_running(dev)) {
		dev->mtu = new_mtu;
		return 0;
	}

1896
	imask = sky2_read32(hw, B0_IMSK);
1897 1898
	sky2_write32(hw, B0_IMSK, 0);

1899 1900 1901 1902
	dev->trans_start = jiffies;	/* prevent tx timeout */
	netif_stop_queue(dev);
	netif_poll_disable(hw->dev[0]);

1903 1904
	synchronize_irq(hw->pdev->irq);

1905 1906 1907 1908
	ctl = gma_read16(hw, sky2->port, GM_GP_CTRL);
	gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
	sky2_rx_stop(sky2);
	sky2_rx_clean(sky2);
1909 1910

	dev->mtu = new_mtu;
1911

1912 1913 1914 1915 1916 1917 1918
	mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);

	if (dev->mtu > ETH_DATA_LEN)
		mode |= GM_SMOD_JUMBO_ENA;

	gma_write16(hw, sky2->port, GM_SERIAL_MODE, mode);
1919

1920
	sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
1921

1922
	err = sky2_rx_start(sky2);
1923
	sky2_write32(hw, B0_IMSK, imask);
1924

1925 1926 1927 1928 1929 1930 1931 1932 1933
	if (err)
		dev_close(dev);
	else {
		gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);

		netif_poll_enable(hw->dev[0]);
		netif_wake_queue(dev);
	}

1934 1935 1936
	return err;
}

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
/* For small just reuse existing skb for next receive */
static struct sk_buff *receive_copy(struct sky2_port *sky2,
				    const struct rx_ring_info *re,
				    unsigned length)
{
	struct sk_buff *skb;

	skb = netdev_alloc_skb(sky2->netdev, length + 2);
	if (likely(skb)) {
		skb_reserve(skb, 2);
		pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
					    length, PCI_DMA_FROMDEVICE);
		memcpy(skb->data, re->skb->data, length);
		skb->ip_summed = re->skb->ip_summed;
		skb->csum = re->skb->csum;
		pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
					       length, PCI_DMA_FROMDEVICE);
		re->skb->ip_summed = CHECKSUM_NONE;
1955
		skb_put(skb, length);
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	}
	return skb;
}

/* Adjust length of skb with fragments to match received data */
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
			  unsigned int length)
{
	int i, num_frags;
	unsigned int size;

	/* put header into skb */
	size = min(length, hdr_space);
	skb->tail += size;
	skb->len += size;
	length -= size;

	num_frags = skb_shinfo(skb)->nr_frags;
	for (i = 0; i < num_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		if (length == 0) {
			/* don't need this page */
			__free_page(frag->page);
			--skb_shinfo(skb)->nr_frags;
		} else {
			size = min(length, (unsigned) PAGE_SIZE);

			frag->size = size;
			skb->data_len += size;
			skb->truesize += size;
			skb->len += size;
			length -= size;
		}
	}
}

/* Normal packet - take skb from ring element and put in a new one  */
static struct sk_buff *receive_new(struct sky2_port *sky2,
				   struct rx_ring_info *re,
				   unsigned int length)
{
	struct sk_buff *skb, *nskb;
	unsigned hdr_space = sky2->rx_data_size;

	pr_debug(PFX "receive new length=%d\n", length);

	/* Don't be tricky about reusing pages (yet) */
	nskb = sky2_rx_alloc(sky2);
	if (unlikely(!nskb))
		return NULL;

	skb = re->skb;
	sky2_rx_unmap_skb(sky2->hw->pdev, re);

	prefetch(skb->data);
	re->skb = nskb;
	sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space);

	if (skb_shinfo(skb)->nr_frags)
		skb_put_frags(skb, hdr_space, length);
	else
2018
		skb_put(skb, length);
2019 2020 2021
	return skb;
}

2022 2023
/*
 * Receive one packet.
S
shemminger@osdl.org 已提交
2024
 * For larger packets, get new buffer.
2025
 */
2026
static struct sk_buff *sky2_receive(struct net_device *dev,
2027 2028
				    u16 length, u32 status)
{
2029
 	struct sky2_port *sky2 = netdev_priv(dev);
2030
	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2031
	struct sk_buff *skb = NULL;
2032 2033 2034

	if (unlikely(netif_msg_rx_status(sky2)))
		printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
2035
		       dev->name, sky2->rx_next, status, length);
2036

S
Stephen Hemminger 已提交
2037
	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
S
Stephen Hemminger 已提交
2038
	prefetch(sky2->rx_ring + sky2->rx_next);
2039

2040
	if (status & GMR_FS_ANY_ERR)
2041 2042
		goto error;

2043 2044 2045
	if (!(status & GMR_FS_RX_OK))
		goto resubmit;

2046
	if (length > dev->mtu + ETH_HLEN)
2047 2048
		goto oversize;

2049 2050 2051 2052
	if (length < copybreak)
		skb = receive_copy(sky2, re, length);
	else
		skb = receive_new(sky2, re, length);
S
Stephen Hemminger 已提交
2053
resubmit:
2054
	sky2_rx_submit(sky2, re);
2055

2056 2057
	return skb;

2058 2059 2060 2061
oversize:
	++sky2->net_stats.rx_over_errors;
	goto resubmit;

2062
error:
2063
	++sky2->net_stats.rx_errors;
2064 2065 2066 2067
	if (status & GMR_FS_RX_FF_OV) {
		sky2->net_stats.rx_fifo_errors++;
		goto resubmit;
	}
2068

2069
	if (netif_msg_rx_err(sky2) && net_ratelimit())
2070
		printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
2071
		       dev->name, status, length);
S
Stephen Hemminger 已提交
2072 2073

	if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2074 2075 2076 2077 2078
		sky2->net_stats.rx_length_errors++;
	if (status & GMR_FS_FRAGMENT)
		sky2->net_stats.rx_frame_errors++;
	if (status & GMR_FS_CRC_ERR)
		sky2->net_stats.rx_crc_errors++;
2079

S
Stephen Hemminger 已提交
2080
	goto resubmit;
2081 2082
}

2083 2084
/* Transmit complete */
static inline void sky2_tx_done(struct net_device *dev, u16 last)
2085
{
2086
	struct sky2_port *sky2 = netdev_priv(dev);
2087

2088
	if (netif_running(dev)) {
2089
		netif_tx_lock(dev);
2090
		sky2_tx_complete(sky2, last);
2091
		netif_tx_unlock(dev);
2092
	}
2093 2094
}

2095 2096
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2097
{
2098
	struct sky2_port *sky2;
2099
	int work_done = 0;
2100
	unsigned buf_write[2] = { 0, 0 };
S
Stephen Hemminger 已提交
2101
	u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
2102

2103
	rmb();
2104

S
Stephen Hemminger 已提交
2105
	while (hw->st_idx != hwidx) {
2106 2107
		struct sky2_status_le *le  = hw->st_le + hw->st_idx;
		struct net_device *dev;
2108 2109 2110 2111
		struct sk_buff *skb;
		u32 status;
		u16 length;

2112
		hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
2113

S
Stephen Hemminger 已提交
2114 2115
		BUG_ON(le->link >= 2);
		dev = hw->dev[le->link];
2116 2117

		sky2 = netdev_priv(dev);
S
Stephen Hemminger 已提交
2118 2119
		length = le16_to_cpu(le->length);
		status = le32_to_cpu(le->status);
2120

S
Stephen Hemminger 已提交
2121
		switch (le->opcode & ~HW_OWNER) {
2122
		case OP_RXSTAT:
2123
			skb = sky2_receive(dev, length, status);
2124
			if (!skb)
2125
				goto force_update;
2126 2127

			skb->protocol = eth_type_trans(skb, dev);
2128 2129
			sky2->net_stats.rx_packets++;
			sky2->net_stats.rx_bytes += skb->len;
2130 2131
			dev->last_rx = jiffies;

2132 2133 2134 2135 2136 2137 2138
#ifdef SKY2_VLAN_TAG_USED
			if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
				vlan_hwaccel_receive_skb(skb,
							 sky2->vlgrp,
							 be16_to_cpu(sky2->rx_tag));
			} else
#endif
2139
				netif_receive_skb(skb);
2140

2141 2142
			/* Update receiver after 16 frames */
			if (++buf_write[le->link] == RX_BUF_WRITE) {
2143 2144
force_update:
				sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put);
2145 2146 2147 2148
				buf_write[le->link] = 0;
			}

			/* Stop after net poll weight */
2149 2150
			if (++work_done >= to_do)
				goto exit_loop;
2151 2152
			break;

2153 2154 2155 2156 2157 2158 2159 2160 2161
#ifdef SKY2_VLAN_TAG_USED
		case OP_RXVLAN:
			sky2->rx_tag = length;
			break;

		case OP_RXCHKSVLAN:
			sky2->rx_tag = length;
			/* fall through */
#endif
2162
		case OP_RXCHKS:
2163
			skb = sky2->rx_ring[sky2->rx_next].skb;
2164
			skb->ip_summed = CHECKSUM_COMPLETE;
S
Stephen Hemminger 已提交
2165
			skb->csum = status & 0xffff;
2166 2167 2168
			break;

		case OP_TXINDEXLE:
2169
			/* TX index reports status for both ports */
S
Stephen Hemminger 已提交
2170 2171
			BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
			sky2_tx_done(hw->dev[0], status & 0xfff);
2172 2173 2174 2175
			if (hw->dev[1])
				sky2_tx_done(hw->dev[1],
				     ((status >> 24) & 0xff)
					     | (u16)(length & 0xf) << 8);
2176 2177 2178 2179
			break;

		default:
			if (net_ratelimit())
S
Stephen Hemminger 已提交
2180
				printk(KERN_WARNING PFX
S
Stephen Hemminger 已提交
2181 2182
				       "unknown status opcode 0x%x\n", le->opcode);
			goto exit_loop;
2183
		}
2184
	}
2185

2186 2187 2188
	/* Fully processed status ring so clear irq */
	sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);

2189
exit_loop:
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
	if (buf_write[0]) {
		sky2 = netdev_priv(hw->dev[0]);
		sky2_put_idx(hw, Q_R1, sky2->rx_put);
	}

	if (buf_write[1]) {
		sky2 = netdev_priv(hw->dev[1]);
		sky2_put_idx(hw, Q_R2, sky2->rx_put);
	}

2200
	return work_done;
2201 2202 2203 2204 2205 2206
}

static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
{
	struct net_device *dev = hw->dev[port];

2207 2208 2209
	if (net_ratelimit())
		printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
		       dev->name, status);
2210 2211

	if (status & Y2_IS_PAR_RD1) {
2212 2213 2214
		if (net_ratelimit())
			printk(KERN_ERR PFX "%s: ram data read parity error\n",
			       dev->name);
2215 2216 2217 2218 2219
		/* Clear IRQ */
		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
	}

	if (status & Y2_IS_PAR_WR1) {
2220 2221 2222
		if (net_ratelimit())
			printk(KERN_ERR PFX "%s: ram data write parity error\n",
			       dev->name);
2223 2224 2225 2226 2227

		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
	}

	if (status & Y2_IS_PAR_MAC1) {
2228 2229
		if (net_ratelimit())
			printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
2230 2231 2232 2233
		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
	}

	if (status & Y2_IS_PAR_RX1) {
2234 2235
		if (net_ratelimit())
			printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
2236 2237 2238 2239
		sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
	}

	if (status & Y2_IS_TCP_TXA1) {
2240 2241 2242
		if (net_ratelimit())
			printk(KERN_ERR PFX "%s: TCP segmentation error\n",
			       dev->name);
2243 2244 2245 2246 2247 2248 2249 2250
		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
	}
}

static void sky2_hw_intr(struct sky2_hw *hw)
{
	u32 status = sky2_read32(hw, B0_HWE_ISRC);

S
Stephen Hemminger 已提交
2251
	if (status & Y2_IS_TIST_OV)
2252 2253 2254
		sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);

	if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
S
Stephen Hemminger 已提交
2255 2256
		u16 pci_err;

2257
		pci_err = sky2_pci_read16(hw, PCI_STATUS);
2258
		if (net_ratelimit())
2259 2260
			dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n",
			        pci_err);
2261 2262

		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2263
		sky2_pci_write16(hw, PCI_STATUS,
2264
				 pci_err | PCI_STATUS_ERROR_BITS);
2265 2266 2267 2268
		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
	}

	if (status & Y2_IS_PCI_EXP) {
S
shemminger@osdl.org 已提交
2269
		/* PCI-Express uncorrectable Error occurred */
S
Stephen Hemminger 已提交
2270 2271
		u32 pex_err;

2272
		pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2273

2274
		if (net_ratelimit())
2275 2276
			dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n",
				pex_err);
2277 2278 2279

		/* clear the interrupt */
		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2280 2281
		sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
				       0xffffffffUL);
2282 2283
		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);

2284
		if (pex_err & PEX_FATAL_ERRORS) {
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
			u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
			hwmsk &= ~Y2_IS_PCI_EXP;
			sky2_write32(hw, B0_HWE_IMSK, hwmsk);
		}
	}

	if (status & Y2_HWE_L1_MASK)
		sky2_hw_error(hw, 0, status);
	status >>= 8;
	if (status & Y2_HWE_L1_MASK)
		sky2_hw_error(hw, 1, status);
}

static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
{
	struct net_device *dev = hw->dev[port];
	struct sky2_port *sky2 = netdev_priv(dev);
	u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));

	if (netif_msg_intr(sky2))
		printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
		       dev->name, status);

	if (status & GM_IS_RX_FF_OR) {
		++sky2->net_stats.rx_fifo_errors;
		sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
	}

	if (status & GM_IS_TX_FF_UR) {
		++sky2->net_stats.tx_fifo_errors;
		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
	}
}

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
/* This should never happen it is a fatal situation */
static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
				  const char *rxtx, u32 mask)
{
	struct net_device *dev = hw->dev[port];
	struct sky2_port *sky2 = netdev_priv(dev);
	u32 imask;

	printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n",
	       dev ? dev->name : "<not registered>", rxtx);

	imask = sky2_read32(hw, B0_IMSK);
	imask &= ~mask;
	sky2_write32(hw, B0_IMSK, imask);

	if (dev) {
		spin_lock(&sky2->phy_lock);
		sky2_link_down(sky2);
		spin_unlock(&sky2->phy_lock);
	}
}
2340

2341 2342 2343
/* If idle then force a fake soft NAPI poll once a second
 * to work around cases where sharing an edge triggered interrupt.
 */
2344 2345 2346 2347 2348 2349 2350
static inline void sky2_idle_start(struct sky2_hw *hw)
{
	if (idle_timeout > 0)
		mod_timer(&hw->idle_timer,
			  jiffies + msecs_to_jiffies(idle_timeout));
}

2351 2352
static void sky2_idle(unsigned long arg)
{
2353 2354
	struct sky2_hw *hw = (struct sky2_hw *) arg;
	struct net_device *dev = hw->dev[0];
2355 2356 2357

	if (__netif_rx_schedule_prep(dev))
		__netif_rx_schedule(dev);
2358 2359

	mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
2360 2361 2362
}


2363
static int sky2_poll(struct net_device *dev0, int *budget)
2364
{
2365 2366 2367
	struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
	int work_limit = min(dev0->quota, *budget);
	int work_done = 0;
2368
	u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2369

S
Stephen Hemminger 已提交
2370 2371
	if (status & Y2_IS_HW_ERR)
		sky2_hw_intr(hw);
2372

S
Stephen Hemminger 已提交
2373 2374
	if (status & Y2_IS_IRQ_PHY1)
		sky2_phy_intr(hw, 0);
2375

S
Stephen Hemminger 已提交
2376 2377
	if (status & Y2_IS_IRQ_PHY2)
		sky2_phy_intr(hw, 1);
2378

S
Stephen Hemminger 已提交
2379 2380
	if (status & Y2_IS_IRQ_MAC1)
		sky2_mac_intr(hw, 0);
2381

S
Stephen Hemminger 已提交
2382 2383
	if (status & Y2_IS_IRQ_MAC2)
		sky2_mac_intr(hw, 1);
2384

S
Stephen Hemminger 已提交
2385 2386
	if (status & Y2_IS_CHK_RX1)
		sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
2387

S
Stephen Hemminger 已提交
2388 2389
	if (status & Y2_IS_CHK_RX2)
		sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
2390

S
Stephen Hemminger 已提交
2391 2392
	if (status & Y2_IS_CHK_TXA1)
		sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
2393

S
Stephen Hemminger 已提交
2394 2395
	if (status & Y2_IS_CHK_TXA2)
		sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2396

S
Stephen Hemminger 已提交
2397
	work_done = sky2_status_intr(hw, work_limit);
2398 2399
	if (work_done < work_limit) {
		netif_rx_complete(dev0);
2400

2401 2402 2403 2404 2405
		sky2_read32(hw, B0_Y2_SP_LISR);
		return 0;
	} else {
		*budget -= work_done;
		dev0->quota -= work_done;
S
Stephen Hemminger 已提交
2406
		return 1;
2407
	}
2408 2409
}

2410
static irqreturn_t sky2_intr(int irq, void *dev_id)
2411 2412 2413 2414 2415 2416 2417 2418 2419
{
	struct sky2_hw *hw = dev_id;
	struct net_device *dev0 = hw->dev[0];
	u32 status;

	/* Reading this mask interrupts as side effect */
	status = sky2_read32(hw, B0_Y2_SP_ISRC2);
	if (status == 0 || status == ~0)
		return IRQ_NONE;
S
Stephen Hemminger 已提交
2420

2421 2422 2423
	prefetch(&hw->st_le[hw->st_idx]);
	if (likely(__netif_rx_schedule_prep(dev0)))
		__netif_rx_schedule(dev0);
S
Stephen Hemminger 已提交
2424

2425 2426 2427 2428 2429 2430 2431
	return IRQ_HANDLED;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void sky2_netpoll(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
2432
	struct net_device *dev0 = sky2->hw->dev[0];
2433

2434 2435
	if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
		__netif_rx_schedule(dev0);
2436 2437 2438 2439
}
#endif

/* Chip internal frequency for clock calculations */
2440
static inline u32 sky2_mhz(const struct sky2_hw *hw)
2441
{
S
Stephen Hemminger 已提交
2442
	switch (hw->chip_id) {
2443
	case CHIP_ID_YUKON_EC:
2444
	case CHIP_ID_YUKON_EC_U:
2445
		return 125;	/* 125 Mhz */
2446
	case CHIP_ID_YUKON_FE:
2447
		return 100;	/* 100 Mhz */
S
Stephen Hemminger 已提交
2448
	default:		/* YUKON_XL */
2449
		return 156;	/* 156 Mhz */
2450 2451 2452
	}
}

2453
static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2454
{
2455
	return sky2_mhz(hw) * us;
2456 2457
}

2458
static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2459
{
2460
	return clk / sky2_mhz(hw);
2461 2462
}

2463

2464
static int __devinit sky2_init(struct sky2_hw *hw)
2465
{
S
Stephen Hemminger 已提交
2466
	u8 t8;
2467 2468

	sky2_write8(hw, B0_CTST, CS_RST_CLR);
2469

2470 2471
	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
	if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2472 2473
		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
			hw->chip_id);
2474 2475 2476
		return -EOPNOTSUPP;
	}

2477 2478 2479 2480
	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;

	/* This rev is really old, and requires untested workarounds */
	if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2481 2482 2483
		dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
			yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
			hw->chip_id, hw->chip_rev);
2484 2485 2486
		return -EOPNOTSUPP;
	}

2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
	hw->ports = 1;
	t8 = sky2_read8(hw, B2_Y2_HW_RES);
	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
			++hw->ports;
	}

	return 0;
}

static void sky2_reset(struct sky2_hw *hw)
{
	u16 status;
	int i;

2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	/* disable ASF */
	if (hw->chip_id <= CHIP_ID_YUKON_EC) {
		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
		sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
	}

	/* do a SW reset */
	sky2_write8(hw, B0_CTST, CS_RST_SET);
	sky2_write8(hw, B0_CTST, CS_RST_CLR);

	/* clear PCI errors, if any */
2514
	status = sky2_pci_read16(hw, PCI_STATUS);
2515

2516
	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2517 2518
	sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);

2519 2520 2521 2522

	sky2_write8(hw, B0_CTST, CS_MRST_CLR);

	/* clear any PEX errors */
2523 2524 2525
	if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
		sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);

2526

2527
	sky2_power_on(hw);
2528 2529 2530 2531 2532 2533 2534 2535

	for (i = 0; i < hw->ports; i++) {
		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
	}

	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);

S
Stephen Hemminger 已提交
2536 2537
	/* Clear I2C IRQ noise */
	sky2_write32(hw, B2_I2C_IRQ, 1);
2538 2539 2540 2541

	/* turn off hardware timer (unused) */
	sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
	sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
S
Stephen Hemminger 已提交
2542

2543 2544
	sky2_write8(hw, B0_Y2LED, LED_STAT_ON);

2545 2546
	/* Turn off descriptor polling */
	sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2547 2548 2549

	/* Turn off receive timestamp */
	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
S
Stephen Hemminger 已提交
2550
	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2551 2552 2553 2554 2555 2556 2557

	/* enable the Tx Arbiters */
	for (i = 0; i < hw->ports; i++)
		sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);

	/* Initialize ram interface */
	for (i = 0; i < hw->ports; i++) {
S
Stephen Hemminger 已提交
2558
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573

		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
	}

2574
	sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2575 2576

	for (i = 0; i < hw->ports; i++)
2577
		sky2_gmac_reset(hw, i);
2578 2579 2580 2581 2582 2583 2584 2585

	memset(hw->st_le, 0, STATUS_LE_BYTES);
	hw->st_idx = 0;

	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);

	sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
S
Stephen Hemminger 已提交
2586
	sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2587 2588

	/* Set the list last index */
S
Stephen Hemminger 已提交
2589
	sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2590

2591 2592
	sky2_write16(hw, STAT_TX_IDX_TH, 10);
	sky2_write8(hw, STAT_FIFO_WM, 16);
2593

2594 2595 2596 2597 2598
	/* set Status-FIFO ISR watermark */
	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
		sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
	else
		sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2599

2600
	sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2601 2602
	sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
	sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2603

S
Stephen Hemminger 已提交
2604
	/* enable status unit */
2605 2606 2607 2608 2609
	sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);

	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
}

static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
{
	return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}

static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	const struct sky2_port *sky2 = netdev_priv(dev);

	wol->supported = sky2_wol_supported(sky2->hw);
	wol->wolopts = sky2->wol;
}

static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
2629

2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
	if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
		return -EOPNOTSUPP;

	sky2->wol = wol->wolopts;

	if (hw->chip_id == CHIP_ID_YUKON_EC_U)
		sky2_write32(hw, B0_CTST, sky2->wol
			     ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);

	if (!netif_running(dev))
		sky2_wol_init(sky2);
2641 2642 2643
	return 0;
}

2644
static u32 sky2_supported_modes(const struct sky2_hw *hw)
2645
{
S
Stephen Hemminger 已提交
2646 2647 2648 2649 2650 2651
	if (sky2_is_copper(hw)) {
		u32 modes = SUPPORTED_10baseT_Half
			| SUPPORTED_10baseT_Full
			| SUPPORTED_100baseT_Half
			| SUPPORTED_100baseT_Full
			| SUPPORTED_Autoneg | SUPPORTED_TP;
2652 2653 2654

		if (hw->chip_id != CHIP_ID_YUKON_FE)
			modes |= SUPPORTED_1000baseT_Half
S
Stephen Hemminger 已提交
2655 2656
				| SUPPORTED_1000baseT_Full;
		return modes;
2657
	} else
S
Stephen Hemminger 已提交
2658 2659 2660 2661
		return  SUPPORTED_1000baseT_Half
			| SUPPORTED_1000baseT_Full
			| SUPPORTED_Autoneg
			| SUPPORTED_FIBRE;
2662 2663
}

S
Stephen Hemminger 已提交
2664
static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2665 2666 2667 2668 2669 2670 2671
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;

	ecmd->transceiver = XCVR_INTERNAL;
	ecmd->supported = sky2_supported_modes(hw);
	ecmd->phy_address = PHY_ADDR_MARV;
S
Stephen Hemminger 已提交
2672
	if (sky2_is_copper(hw)) {
2673
		ecmd->supported = SUPPORTED_10baseT_Half
S
Stephen Hemminger 已提交
2674 2675 2676 2677 2678 2679
		    | SUPPORTED_10baseT_Full
		    | SUPPORTED_100baseT_Half
		    | SUPPORTED_100baseT_Full
		    | SUPPORTED_1000baseT_Half
		    | SUPPORTED_1000baseT_Full
		    | SUPPORTED_Autoneg | SUPPORTED_TP;
2680
		ecmd->port = PORT_TP;
S
Stephen Hemminger 已提交
2681 2682 2683
		ecmd->speed = sky2->speed;
	} else {
		ecmd->speed = SPEED_1000;
2684
		ecmd->port = PORT_FIBRE;
S
Stephen Hemminger 已提交
2685
	}
2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705

	ecmd->advertising = sky2->advertising;
	ecmd->autoneg = sky2->autoneg;
	ecmd->duplex = sky2->duplex;
	return 0;
}

static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	const struct sky2_hw *hw = sky2->hw;
	u32 supported = sky2_supported_modes(hw);

	if (ecmd->autoneg == AUTONEG_ENABLE) {
		ecmd->advertising = supported;
		sky2->duplex = -1;
		sky2->speed = -1;
	} else {
		u32 setting;

S
Stephen Hemminger 已提交
2706
		switch (ecmd->speed) {
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
		case SPEED_1000:
			if (ecmd->duplex == DUPLEX_FULL)
				setting = SUPPORTED_1000baseT_Full;
			else if (ecmd->duplex == DUPLEX_HALF)
				setting = SUPPORTED_1000baseT_Half;
			else
				return -EINVAL;
			break;
		case SPEED_100:
			if (ecmd->duplex == DUPLEX_FULL)
				setting = SUPPORTED_100baseT_Full;
			else if (ecmd->duplex == DUPLEX_HALF)
				setting = SUPPORTED_100baseT_Half;
			else
				return -EINVAL;
			break;

		case SPEED_10:
			if (ecmd->duplex == DUPLEX_FULL)
				setting = SUPPORTED_10baseT_Full;
			else if (ecmd->duplex == DUPLEX_HALF)
				setting = SUPPORTED_10baseT_Half;
			else
				return -EINVAL;
			break;
		default:
			return -EINVAL;
		}

		if ((setting & supported) == 0)
			return -EINVAL;

		sky2->speed = ecmd->speed;
		sky2->duplex = ecmd->duplex;
	}

	sky2->autoneg = ecmd->autoneg;
	sky2->advertising = ecmd->advertising;

2746 2747
	if (netif_running(dev))
		sky2_phy_reinit(sky2);
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763

	return 0;
}

static void sky2_get_drvinfo(struct net_device *dev,
			     struct ethtool_drvinfo *info)
{
	struct sky2_port *sky2 = netdev_priv(dev);

	strcpy(info->driver, DRV_NAME);
	strcpy(info->version, DRV_VERSION);
	strcpy(info->fw_version, "N/A");
	strcpy(info->bus_info, pci_name(sky2->hw->pdev));
}

static const struct sky2_stat {
S
Stephen Hemminger 已提交
2764 2765
	char name[ETH_GSTRING_LEN];
	u16 offset;
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
} sky2_stats[] = {
	{ "tx_bytes",	   GM_TXO_OK_HI },
	{ "rx_bytes",	   GM_RXO_OK_HI },
	{ "tx_broadcast",  GM_TXF_BC_OK },
	{ "rx_broadcast",  GM_RXF_BC_OK },
	{ "tx_multicast",  GM_TXF_MC_OK },
	{ "rx_multicast",  GM_RXF_MC_OK },
	{ "tx_unicast",    GM_TXF_UC_OK },
	{ "rx_unicast",    GM_RXF_UC_OK },
	{ "tx_mac_pause",  GM_TXF_MPAUSE },
	{ "rx_mac_pause",  GM_RXF_MPAUSE },
2777
	{ "collisions",    GM_TXF_COL },
2778 2779
	{ "late_collision",GM_TXF_LAT_COL },
	{ "aborted", 	   GM_TXF_ABO_COL },
2780
	{ "single_collisions", GM_TXF_SNG_COL },
2781
	{ "multi_collisions", GM_TXF_MUL_COL },
2782

2783
	{ "rx_short",      GM_RXF_SHT },
2784
	{ "rx_runt", 	   GM_RXE_FRAG },
2785 2786 2787 2788 2789 2790 2791
	{ "rx_64_byte_packets", GM_RXF_64B },
	{ "rx_65_to_127_byte_packets", GM_RXF_127B },
	{ "rx_128_to_255_byte_packets", GM_RXF_255B },
	{ "rx_256_to_511_byte_packets", GM_RXF_511B },
	{ "rx_512_to_1023_byte_packets", GM_RXF_1023B },
	{ "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
	{ "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
2792
	{ "rx_too_long",   GM_RXF_LNG_ERR },
2793 2794
	{ "rx_fifo_overflow", GM_RXE_FIFO_OV },
	{ "rx_jabber",     GM_RXF_JAB_PKT },
2795
	{ "rx_fcs_error",   GM_RXF_FCS_ERR },
2796 2797 2798 2799 2800 2801 2802 2803 2804

	{ "tx_64_byte_packets", GM_TXF_64B },
	{ "tx_65_to_127_byte_packets", GM_TXF_127B },
	{ "tx_128_to_255_byte_packets", GM_TXF_255B },
	{ "tx_256_to_511_byte_packets", GM_TXF_511B },
	{ "tx_512_to_1023_byte_packets", GM_TXF_1023B },
	{ "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
	{ "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
	{ "tx_fifo_underrun", GM_TXE_FIFO_UR },
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
};

static u32 sky2_get_rx_csum(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);

	return sky2->rx_csum;
}

static int sky2_set_rx_csum(struct net_device *dev, u32 data)
{
	struct sky2_port *sky2 = netdev_priv(dev);

	sky2->rx_csum = data;
S
Stephen Hemminger 已提交
2819

2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
	sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
		     data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);

	return 0;
}

static u32 sky2_get_msglevel(struct net_device *netdev)
{
	struct sky2_port *sky2 = netdev_priv(netdev);
	return sky2->msg_enable;
}

2832 2833 2834 2835
static int sky2_nway_reset(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);

2836
	if (!netif_running(dev) || sky2->autoneg != AUTONEG_ENABLE)
2837 2838
		return -EINVAL;

2839
	sky2_phy_reinit(sky2);
2840 2841 2842 2843

	return 0;
}

S
Stephen Hemminger 已提交
2844
static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
2845 2846 2847 2848 2849 2850
{
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	int i;

	data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
S
Stephen Hemminger 已提交
2851
	    | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
2852
	data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
S
Stephen Hemminger 已提交
2853
	    | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
2854

S
Stephen Hemminger 已提交
2855
	for (i = 2; i < count; i++)
2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
		data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
}

static void sky2_set_msglevel(struct net_device *netdev, u32 value)
{
	struct sky2_port *sky2 = netdev_priv(netdev);
	sky2->msg_enable = value;
}

static int sky2_get_stats_count(struct net_device *dev)
{
	return ARRAY_SIZE(sky2_stats);
}

static void sky2_get_ethtool_stats(struct net_device *dev,
S
Stephen Hemminger 已提交
2871
				   struct ethtool_stats *stats, u64 * data)
2872 2873 2874
{
	struct sky2_port *sky2 = netdev_priv(dev);

S
Stephen Hemminger 已提交
2875
	sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
2876 2877
}

S
Stephen Hemminger 已提交
2878
static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
{
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       sky2_stats[i].name, ETH_GSTRING_LEN);
		break;
	}
}

static struct net_device_stats *sky2_get_stats(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	return &sky2->net_stats;
}

static int sky2_set_mac_address(struct net_device *dev, void *p)
{
	struct sky2_port *sky2 = netdev_priv(dev);
2900 2901 2902
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	const struct sockaddr *addr = p;
2903 2904 2905 2906 2907

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2908
	memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
2909
		    dev->dev_addr, ETH_ALEN);
2910
	memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
2911
		    dev->dev_addr, ETH_ALEN);
2912

2913 2914 2915 2916 2917
	/* virtual address for data */
	gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);

	/* physical address: used for pause frames */
	gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2918 2919

	return 0;
2920 2921
}

2922 2923 2924 2925 2926 2927 2928 2929
static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
{
	u32 bit;

	bit = ether_crc(ETH_ALEN, addr) & 63;
	filter[bit >> 3] |= 1 << (bit & 7);
}

2930 2931 2932 2933 2934 2935 2936 2937
static void sky2_set_multicast(struct net_device *dev)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
	struct dev_mc_list *list = dev->mc_list;
	u16 reg;
	u8 filter[8];
2938 2939
	int rx_pause;
	static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2940

2941
	rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
2942 2943 2944 2945 2946
	memset(filter, 0, sizeof(filter));

	reg = gma_read16(hw, port, GM_RX_CTRL);
	reg |= GM_RXCR_UCF_ENA;

S
shemminger@osdl.org 已提交
2947
	if (dev->flags & IFF_PROMISC)	/* promiscuous */
2948
		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
2949
	else if (dev->flags & IFF_ALLMULTI)
2950
		memset(filter, 0xff, sizeof(filter));
2951
	else if (dev->mc_count == 0 && !rx_pause)
2952 2953 2954 2955 2956
		reg &= ~GM_RXCR_MCF_ENA;
	else {
		int i;
		reg |= GM_RXCR_MCF_ENA;

2957 2958 2959 2960 2961
		if (rx_pause)
			sky2_add_filter(filter, pause_mc_addr);

		for (i = 0; list && i < dev->mc_count; i++, list = list->next)
			sky2_add_filter(filter, list->dmi_addr);
2962 2963 2964
	}

	gma_write16(hw, port, GM_MC_ADDR_H1,
S
Stephen Hemminger 已提交
2965
		    (u16) filter[0] | ((u16) filter[1] << 8));
2966
	gma_write16(hw, port, GM_MC_ADDR_H2,
S
Stephen Hemminger 已提交
2967
		    (u16) filter[2] | ((u16) filter[3] << 8));
2968
	gma_write16(hw, port, GM_MC_ADDR_H3,
S
Stephen Hemminger 已提交
2969
		    (u16) filter[4] | ((u16) filter[5] << 8));
2970
	gma_write16(hw, port, GM_MC_ADDR_H4,
S
Stephen Hemminger 已提交
2971
		    (u16) filter[6] | ((u16) filter[7] << 8));
2972 2973 2974 2975 2976 2977 2978

	gma_write16(hw, port, GM_RX_CTRL, reg);
}

/* Can have one global because blinking is controlled by
 * ethtool and that is always under RTNL mutex
 */
2979
static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
2980
{
S
Stephen Hemminger 已提交
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
	u16 pg;

	switch (hw->chip_id) {
	case CHIP_ID_YUKON_XL:
		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
			     on ? (PHY_M_LEDC_LOS_CTRL(1) |
				   PHY_M_LEDC_INIT_CTRL(7) |
				   PHY_M_LEDC_STA1_CTRL(7) |
				   PHY_M_LEDC_STA0_CTRL(7))
			     : 0);

		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
		break;

	default:
		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
S
Stephen Hemminger 已提交
2999 3000
		gm_phy_write(hw, port, PHY_MARV_LED_OVER, 
			     on ? PHY_M_LED_ALL : 0);
S
Stephen Hemminger 已提交
3001
	}
3002 3003 3004 3005 3006 3007 3008 3009
}

/* blink LED's for finding board */
static int sky2_phys_id(struct net_device *dev, u32 data)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
	unsigned port = sky2->port;
S
Stephen Hemminger 已提交
3010
	u16 ledctrl, ledover = 0;
3011
	long ms;
3012
	int interrupted;
3013 3014
	int onoff = 1;

S
Stephen Hemminger 已提交
3015
	if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))
3016 3017 3018 3019 3020
		ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
	else
		ms = data * 1000;

	/* save initial values */
3021
	spin_lock_bh(&sky2->phy_lock);
S
Stephen Hemminger 已提交
3022 3023 3024 3025 3026 3027 3028 3029 3030
	if (hw->chip_id == CHIP_ID_YUKON_XL) {
		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
		ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
	} else {
		ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
		ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
	}
3031

3032 3033
	interrupted = 0;
	while (!interrupted && ms > 0) {
3034 3035 3036
		sky2_led(hw, port, onoff);
		onoff = !onoff;

3037
		spin_unlock_bh(&sky2->phy_lock);
3038
		interrupted = msleep_interruptible(250);
3039
		spin_lock_bh(&sky2->phy_lock);
3040

3041 3042 3043 3044
		ms -= 250;
	}

	/* resume regularly scheduled programming */
S
Stephen Hemminger 已提交
3045 3046 3047 3048 3049 3050 3051 3052 3053
	if (hw->chip_id == CHIP_ID_YUKON_XL) {
		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl);
		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
	} else {
		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
		gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
	}
3054
	spin_unlock_bh(&sky2->phy_lock);
3055 3056 3057 3058 3059 3060 3061 3062 3063

	return 0;
}

static void sky2_get_pauseparam(struct net_device *dev,
				struct ethtool_pauseparam *ecmd)
{
	struct sky2_port *sky2 = netdev_priv(dev);

3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077
	switch (sky2->flow_mode) {
	case FC_NONE:
		ecmd->tx_pause = ecmd->rx_pause = 0;
		break;
	case FC_TX:
		ecmd->tx_pause = 1, ecmd->rx_pause = 0;
		break;
	case FC_RX:
		ecmd->tx_pause = 0, ecmd->rx_pause = 1;
		break;
	case FC_BOTH:
		ecmd->tx_pause = ecmd->rx_pause = 1;
	}

3078 3079 3080 3081 3082 3083 3084 3085 3086
	ecmd->autoneg = sky2->autoneg;
}

static int sky2_set_pauseparam(struct net_device *dev,
			       struct ethtool_pauseparam *ecmd)
{
	struct sky2_port *sky2 = netdev_priv(dev);

	sky2->autoneg = ecmd->autoneg;
3087
	sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
3088

3089 3090
	if (netif_running(dev))
		sky2_phy_reinit(sky2);
3091

3092
	return 0;
3093 3094
}

3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134
static int sky2_get_coalesce(struct net_device *dev,
			     struct ethtool_coalesce *ecmd)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;

	if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
		ecmd->tx_coalesce_usecs = 0;
	else {
		u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
		ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
	}
	ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);

	if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
		ecmd->rx_coalesce_usecs = 0;
	else {
		u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
		ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
	}
	ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);

	if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
		ecmd->rx_coalesce_usecs_irq = 0;
	else {
		u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
		ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
	}

	ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);

	return 0;
}

/* Note: this affect both ports */
static int sky2_set_coalesce(struct net_device *dev,
			     struct ethtool_coalesce *ecmd)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	struct sky2_hw *hw = sky2->hw;
3135
	const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
3136

3137 3138 3139
	if (ecmd->tx_coalesce_usecs > tmax ||
	    ecmd->rx_coalesce_usecs > tmax ||
	    ecmd->rx_coalesce_usecs_irq > tmax)
3140 3141
		return -EINVAL;

3142
	if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
3143
		return -EINVAL;
3144
	if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
3145
		return -EINVAL;
3146
	if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
		return -EINVAL;

	if (ecmd->tx_coalesce_usecs == 0)
		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
	else {
		sky2_write32(hw, STAT_TX_TIMER_INI,
			     sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
		sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
	}
	sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);

	if (ecmd->rx_coalesce_usecs == 0)
		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
	else {
		sky2_write32(hw, STAT_LEV_TIMER_INI,
			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
		sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
	}
	sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);

	if (ecmd->rx_coalesce_usecs_irq == 0)
		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
	else {
3170
		sky2_write32(hw, STAT_ISR_TIMER_INI,
3171 3172 3173 3174 3175 3176 3177
			     sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
		sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
	}
	sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
	return 0;
}

S
Stephen Hemminger 已提交
3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
static void sky2_get_ringparam(struct net_device *dev,
			       struct ethtool_ringparam *ering)
{
	struct sky2_port *sky2 = netdev_priv(dev);

	ering->rx_max_pending = RX_MAX_PENDING;
	ering->rx_mini_max_pending = 0;
	ering->rx_jumbo_max_pending = 0;
	ering->tx_max_pending = TX_RING_SIZE - 1;

	ering->rx_pending = sky2->rx_pending;
	ering->rx_mini_pending = 0;
	ering->rx_jumbo_pending = 0;
	ering->tx_pending = sky2->tx_pending;
}

static int sky2_set_ringparam(struct net_device *dev,
			      struct ethtool_ringparam *ering)
{
	struct sky2_port *sky2 = netdev_priv(dev);
	int err = 0;

	if (ering->rx_pending > RX_MAX_PENDING ||
	    ering->rx_pending < 8 ||
	    ering->tx_pending < MAX_SKB_TX_LE ||
	    ering->tx_pending > TX_RING_SIZE - 1)
		return -EINVAL;

	if (netif_running(dev))
		sky2_down(dev);

	sky2->rx_pending = ering->rx_pending;
	sky2->tx_pending = ering->tx_pending;

3212
	if (netif_running(dev)) {
S
Stephen Hemminger 已提交
3213
		err = sky2_up(dev);
3214 3215
		if (err)
			dev_close(dev);
3216 3217
		else
			sky2_set_multicast(dev);
3218
	}
S
Stephen Hemminger 已提交
3219 3220 3221 3222 3223 3224

	return err;
}

static int sky2_get_regs_len(struct net_device *dev)
{
3225
	return 0x4000;
S
Stephen Hemminger 已提交
3226 3227 3228 3229
}

/*
 * Returns copy of control register region
3230
 * Note: access to the RAM address register set will cause timeouts.
S
Stephen Hemminger 已提交
3231 3232 3233 3234 3235 3236 3237
 */
static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
			  void *p)
{
	const struct sky2_port *sky2 = netdev_priv(dev);
	const void __iomem *io = sky2->hw->regs;

3238
	BUG_ON(regs->len < B3_RI_WTO_R1);
S
Stephen Hemminger 已提交
3239
	regs->version = 1;
3240
	memset(p, 0, regs->len);
S
Stephen Hemminger 已提交
3241

3242 3243 3244 3245 3246
	memcpy_fromio(p, io, B3_RAM_ADDR);

	memcpy_fromio(p + B3_RI_WTO_R1,
		      io + B3_RI_WTO_R1,
		      regs->len - B3_RI_WTO_R1);
S
Stephen Hemminger 已提交
3247
}
3248

3249
static const struct ethtool_ops sky2_ethtool_ops = {
S
Stephen Hemminger 已提交
3250 3251
	.get_settings = sky2_get_settings,
	.set_settings = sky2_set_settings,
3252 3253 3254
	.get_drvinfo  = sky2_get_drvinfo,
	.get_wol      = sky2_get_wol,
	.set_wol      = sky2_set_wol,
S
Stephen Hemminger 已提交
3255 3256
	.get_msglevel = sky2_get_msglevel,
	.set_msglevel = sky2_set_msglevel,
3257
	.nway_reset   = sky2_nway_reset,
S
Stephen Hemminger 已提交
3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269
	.get_regs_len = sky2_get_regs_len,
	.get_regs = sky2_get_regs,
	.get_link = ethtool_op_get_link,
	.get_sg = ethtool_op_get_sg,
	.set_sg = ethtool_op_set_sg,
	.get_tx_csum = ethtool_op_get_tx_csum,
	.set_tx_csum = ethtool_op_set_tx_csum,
	.get_tso = ethtool_op_get_tso,
	.set_tso = ethtool_op_set_tso,
	.get_rx_csum = sky2_get_rx_csum,
	.set_rx_csum = sky2_set_rx_csum,
	.get_strings = sky2_get_strings,
3270 3271
	.get_coalesce = sky2_get_coalesce,
	.set_coalesce = sky2_set_coalesce,
S
Stephen Hemminger 已提交
3272 3273
	.get_ringparam = sky2_get_ringparam,
	.set_ringparam = sky2_set_ringparam,
3274 3275
	.get_pauseparam = sky2_get_pauseparam,
	.set_pauseparam = sky2_set_pauseparam,
S
Stephen Hemminger 已提交
3276
	.phys_id = sky2_phys_id,
3277 3278
	.get_stats_count = sky2_get_stats_count,
	.get_ethtool_stats = sky2_get_ethtool_stats,
3279
	.get_perm_addr	= ethtool_op_get_perm_addr,
3280 3281 3282 3283
};

/* Initialize network device */
static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3284 3285
						     unsigned port,
						     int highmem, int wol)
3286 3287 3288 3289 3290
{
	struct sky2_port *sky2;
	struct net_device *dev = alloc_etherdev(sizeof(*sky2));

	if (!dev) {
3291
		dev_err(&hw->pdev->dev, "etherdev alloc failed");
3292 3293 3294 3295 3296
		return NULL;
	}

	SET_MODULE_OWNER(dev);
	SET_NETDEV_DEV(dev, &hw->pdev->dev);
3297
	dev->irq = hw->pdev->irq;
3298 3299
	dev->open = sky2_up;
	dev->stop = sky2_down;
3300
	dev->do_ioctl = sky2_ioctl;
3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
	dev->hard_start_xmit = sky2_xmit_frame;
	dev->get_stats = sky2_get_stats;
	dev->set_multicast_list = sky2_set_multicast;
	dev->set_mac_address = sky2_set_mac_address;
	dev->change_mtu = sky2_change_mtu;
	SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
	dev->tx_timeout = sky2_tx_timeout;
	dev->watchdog_timeo = TX_WATCHDOG;
	if (port == 0)
		dev->poll = sky2_poll;
	dev->weight = NAPI_WEIGHT;
#ifdef CONFIG_NET_POLL_CONTROLLER
3313 3314 3315 3316 3317
	/* Network console (only works on port 0)
	 * because netpoll makes assumptions about NAPI
	 */
	if (port == 0)
		dev->poll_controller = sky2_netpoll;
3318 3319 3320 3321 3322 3323 3324 3325 3326
#endif

	sky2 = netdev_priv(dev);
	sky2->netdev = dev;
	sky2->hw = hw;
	sky2->msg_enable = netif_msg_init(debug, default_msg);

	/* Auto speed and flow control */
	sky2->autoneg = AUTONEG_ENABLE;
3327 3328
	sky2->flow_mode = FC_BOTH;

3329 3330 3331
	sky2->duplex = -1;
	sky2->speed = -1;
	sky2->advertising = sky2_supported_modes(hw);
3332
	sky2->rx_csum = 1;
3333
	sky2->wol = wol;
3334

3335
	spin_lock_init(&sky2->phy_lock);
S
Stephen Hemminger 已提交
3336
	sky2->tx_pending = TX_DEF_PENDING;
3337
	sky2->rx_pending = RX_DEF_PENDING;
3338 3339 3340 3341 3342

	hw->dev[port] = dev;

	sky2->port = port;

S
Stephen Hemminger 已提交
3343
	dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
3344 3345 3346
	if (highmem)
		dev->features |= NETIF_F_HIGHDMA;

3347 3348 3349 3350 3351 3352
#ifdef SKY2_VLAN_TAG_USED
	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
	dev->vlan_rx_register = sky2_vlan_rx_register;
	dev->vlan_rx_kill_vid = sky2_vlan_rx_kill_vid;
#endif

3353
	/* read the mac address */
S
Stephen Hemminger 已提交
3354
	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
3355
	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3356 3357 3358 3359 3360 3361 3362 3363

	/* device is off until link detection */
	netif_carrier_off(dev);
	netif_stop_queue(dev);

	return dev;
}

3364
static void __devinit sky2_show_addr(struct net_device *dev)
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
{
	const struct sky2_port *sky2 = netdev_priv(dev);

	if (netif_msg_probe(sky2))
		printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
		       dev->name,
		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
}

3375
/* Handle software interrupt used during MSI test */
3376
static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
3377 3378 3379 3380 3381 3382 3383 3384
{
	struct sky2_hw *hw = dev_id;
	u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);

	if (status == 0)
		return IRQ_NONE;

	if (status & Y2_IS_IRQ_SW) {
3385
		hw->msi = 1;
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399
		wake_up(&hw->msi_wait);
		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
	}
	sky2_write32(hw, B0_Y2_SP_ICR, 2);

	return IRQ_HANDLED;
}

/* Test interrupt path by forcing a a software IRQ */
static int __devinit sky2_test_msi(struct sky2_hw *hw)
{
	struct pci_dev *pdev = hw->pdev;
	int err;

3400 3401
	init_waitqueue_head (&hw->msi_wait);

3402 3403
	sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);

3404
	err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
3405
	if (err) {
3406
		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3407 3408 3409 3410
		return err;
	}

	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3411
	sky2_read8(hw, B0_CTST);
3412

3413
	wait_event_timeout(hw->msi_wait, hw->msi, HZ/10);
3414

3415
	if (!hw->msi) {
3416
		/* MSI test failed, go back to INTx mode */
3417 3418
		dev_info(&pdev->dev, "No interrupt generated using MSI, "
			 "switching to INTx mode.\n");
3419 3420 3421 3422 3423 3424

		err = -EOPNOTSUPP;
		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
	}

	sky2_write32(hw, B0_IMSK, 0);
3425
	sky2_read32(hw, B0_IMSK);
3426 3427 3428 3429 3430 3431

	free_irq(pdev->irq, hw);

	return err;
}

3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443
static int __devinit pci_wake_enabled(struct pci_dev *dev)
{
	int pm  = pci_find_capability(dev, PCI_CAP_ID_PM);
	u16 value;

	if (!pm)
		return 0;
	if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
		return 0;
	return value & PCI_PM_CTRL_PME_ENABLE;
}

3444 3445 3446
static int __devinit sky2_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
3447
	struct net_device *dev;
3448
	struct sky2_hw *hw;
3449
	int err, using_dac = 0, wol_default;
3450

S
Stephen Hemminger 已提交
3451 3452
	err = pci_enable_device(pdev);
	if (err) {
3453
		dev_err(&pdev->dev, "cannot enable PCI device\n");
3454 3455 3456
		goto err_out;
	}

S
Stephen Hemminger 已提交
3457 3458
	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
3459
		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
S
Stephen Hemminger 已提交
3460
		goto err_out;
3461 3462 3463 3464
	}

	pci_set_master(pdev);

3465 3466 3467 3468 3469
	if (sizeof(dma_addr_t) > sizeof(u32) &&
	    !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
		using_dac = 1;
		err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (err < 0) {
3470 3471
			dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
				"for consistent allocations\n");
3472 3473 3474
			goto err_out_free_regions;
		}
	} else {
3475 3476
		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (err) {
3477
			dev_err(&pdev->dev, "no usable DMA configuration\n");
3478 3479 3480
			goto err_out_free_regions;
		}
	}
3481

3482 3483
	wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;

3484
	err = -ENOMEM;
S
Stephen Hemminger 已提交
3485
	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3486
	if (!hw) {
3487
		dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3488 3489 3490 3491 3492 3493 3494
		goto err_out_free_regions;
	}

	hw->pdev = pdev;

	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
	if (!hw->regs) {
3495
		dev_err(&pdev->dev, "cannot map device registers\n");
3496 3497 3498
		goto err_out_free_hw;
	}

3499
#ifdef __BIG_ENDIAN
S
Stephen Hemminger 已提交
3500 3501 3502
	/* The sk98lin vendor driver uses hardware byte swapping but
	 * this driver uses software swapping.
	 */
3503 3504 3505
	{
		u32 reg;
		reg = sky2_pci_read32(hw, PCI_DEV_REG2);
S
Stephen Hemminger 已提交
3506
		reg &= ~PCI_REV_DESC;
3507 3508 3509 3510
		sky2_pci_write32(hw, PCI_DEV_REG2, reg);
	}
#endif

3511 3512 3513 3514 3515 3516
	/* ring for status responses */
	hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
					 &hw->st_dma);
	if (!hw->st_le)
		goto err_out_iounmap;

3517
	err = sky2_init(hw);
3518
	if (err)
S
Stephen Hemminger 已提交
3519
		goto err_out_iounmap;
3520

3521
	dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
3522 3523
	       DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
	       pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
S
Stephen Hemminger 已提交
3524
	       hw->chip_id, hw->chip_rev);
3525

3526 3527 3528
	sky2_reset(hw);

	dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
3529 3530
	if (!dev) {
		err = -ENOMEM;
3531
		goto err_out_free_pci;
3532
	}
3533

3534 3535 3536 3537 3538 3539 3540 3541
	if (!disable_msi && pci_enable_msi(pdev) == 0) {
		err = sky2_test_msi(hw);
		if (err == -EOPNOTSUPP)
 			pci_disable_msi(pdev);
		else if (err)
			goto err_out_free_netdev;
 	}

S
Stephen Hemminger 已提交
3542 3543
	err = register_netdev(dev);
	if (err) {
3544
		dev_err(&pdev->dev, "cannot register net device\n");
3545 3546 3547
		goto err_out_free_netdev;
	}

3548 3549
	err = request_irq(pdev->irq,  sky2_intr, hw->msi ? 0 : IRQF_SHARED,
			  dev->name, hw);
3550
	if (err) {
3551
		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3552 3553 3554 3555
		goto err_out_unregister;
	}
	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);

3556 3557
	sky2_show_addr(dev);

3558 3559 3560
	if (hw->ports > 1) {
		struct net_device *dev1;

3561
		dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
3562 3563 3564 3565 3566
		if (!dev1)
			dev_warn(&pdev->dev, "allocation for second device failed\n");
		else if ((err = register_netdev(dev1))) {
			dev_warn(&pdev->dev,
				 "register of second port failed (%d)\n", err);
3567 3568
			hw->dev[1] = NULL;
			free_netdev(dev1);
3569 3570
		} else
			sky2_show_addr(dev1);
3571 3572
	}

3573
	setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
3574
	sky2_idle_start(hw);
3575

S
Stephen Hemminger 已提交
3576 3577
	pci_set_drvdata(pdev, hw);

3578 3579
	return 0;

S
Stephen Hemminger 已提交
3580
err_out_unregister:
3581 3582
	if (hw->msi)
		pci_disable_msi(pdev);
S
Stephen Hemminger 已提交
3583
	unregister_netdev(dev);
3584 3585 3586
err_out_free_netdev:
	free_netdev(dev);
err_out_free_pci:
S
Stephen Hemminger 已提交
3587
	sky2_write8(hw, B0_CTST, CS_RST_SET);
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601
	pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
err_out_iounmap:
	iounmap(hw->regs);
err_out_free_hw:
	kfree(hw);
err_out_free_regions:
	pci_release_regions(pdev);
	pci_disable_device(pdev);
err_out:
	return err;
}

static void __devexit sky2_remove(struct pci_dev *pdev)
{
S
Stephen Hemminger 已提交
3602
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3603 3604
	struct net_device *dev0, *dev1;

S
Stephen Hemminger 已提交
3605
	if (!hw)
3606 3607
		return;

3608 3609 3610
	del_timer_sync(&hw->idle_timer);

	sky2_write32(hw, B0_IMSK, 0);
3611 3612
	synchronize_irq(hw->pdev->irq);

3613
	dev0 = hw->dev[0];
S
Stephen Hemminger 已提交
3614 3615 3616
	dev1 = hw->dev[1];
	if (dev1)
		unregister_netdev(dev1);
3617 3618
	unregister_netdev(dev0);

3619 3620
	sky2_power_aux(hw);

3621
	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
S
Stephen Hemminger 已提交
3622
	sky2_write8(hw, B0_CTST, CS_RST_SET);
3623
	sky2_read8(hw, B0_CTST);
3624 3625

	free_irq(pdev->irq, hw);
3626 3627
	if (hw->msi)
		pci_disable_msi(pdev);
S
Stephen Hemminger 已提交
3628
	pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
3629 3630
	pci_release_regions(pdev);
	pci_disable_device(pdev);
S
Stephen Hemminger 已提交
3631

3632 3633 3634 3635 3636
	if (dev1)
		free_netdev(dev1);
	free_netdev(dev0);
	iounmap(hw->regs);
	kfree(hw);
3637

3638 3639 3640 3641 3642 3643
	pci_set_drvdata(pdev, NULL);
}

#ifdef CONFIG_PM
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
S
Stephen Hemminger 已提交
3644
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3645
	int i, wol = 0;
3646

3647
	del_timer_sync(&hw->idle_timer);
3648
	netif_poll_disable(hw->dev[0]);
3649

3650
	for (i = 0; i < hw->ports; i++) {
3651
		struct net_device *dev = hw->dev[i];
3652
		struct sky2_port *sky2 = netdev_priv(dev);
3653

3654
		if (netif_running(dev))
3655
			sky2_down(dev);
3656 3657 3658 3659 3660

		if (sky2->wol)
			sky2_wol_init(sky2);

		wol |= sky2->wol;
3661 3662
	}

3663
	sky2_write32(hw, B0_IMSK, 0);
3664
	sky2_power_aux(hw);
3665

3666
	pci_save_state(pdev);
3667
	pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3668 3669
	pci_set_power_state(pdev, pci_choose_state(pdev, state));

3670
	return 0;
3671 3672 3673 3674
}

static int sky2_resume(struct pci_dev *pdev)
{
S
Stephen Hemminger 已提交
3675
	struct sky2_hw *hw = pci_get_drvdata(pdev);
3676
	int i, err;
3677

3678 3679 3680 3681 3682 3683 3684 3685
	err = pci_set_power_state(pdev, PCI_D0);
	if (err)
		goto out;

	err = pci_restore_state(pdev);
	if (err)
		goto out;

3686
	pci_enable_wake(pdev, PCI_D0, 0);
3687
	sky2_reset(hw);
3688

3689 3690
	sky2_write32(hw, B0_IMSK, Y2_IS_BASE);

3691
	for (i = 0; i < hw->ports; i++) {
3692
		struct net_device *dev = hw->dev[i];
3693
		if (netif_running(dev)) {
3694 3695 3696 3697 3698
			err = sky2_up(dev);
			if (err) {
				printk(KERN_ERR PFX "%s: could not up: %d\n",
				       dev->name, err);
				dev_close(dev);
3699
				goto out;
3700
			}
3701 3702
		}
	}
3703

3704
	netif_poll_enable(hw->dev[0]);
3705
	sky2_idle_start(hw);
3706
	return 0;
3707
out:
3708
	dev_err(&pdev->dev, "resume failed (%d)\n", err);
3709
	pci_disable_device(pdev);
3710
	return err;
3711 3712 3713
}
#endif

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742
static void sky2_shutdown(struct pci_dev *pdev)
{
	struct sky2_hw *hw = pci_get_drvdata(pdev);
	int i, wol = 0;

	del_timer_sync(&hw->idle_timer);
	netif_poll_disable(hw->dev[0]);

	for (i = 0; i < hw->ports; i++) {
		struct net_device *dev = hw->dev[i];
		struct sky2_port *sky2 = netdev_priv(dev);

		if (sky2->wol) {
			wol = 1;
			sky2_wol_init(sky2);
		}
	}

	if (wol)
		sky2_power_aux(hw);

	pci_enable_wake(pdev, PCI_D3hot, wol);
	pci_enable_wake(pdev, PCI_D3cold, wol);

	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);

}

3743
static struct pci_driver sky2_driver = {
S
Stephen Hemminger 已提交
3744 3745 3746 3747
	.name = DRV_NAME,
	.id_table = sky2_id_table,
	.probe = sky2_probe,
	.remove = __devexit_p(sky2_remove),
3748
#ifdef CONFIG_PM
S
Stephen Hemminger 已提交
3749 3750
	.suspend = sky2_suspend,
	.resume = sky2_resume,
3751
#endif
3752
	.shutdown = sky2_shutdown,
3753 3754 3755 3756
};

static int __init sky2_init_module(void)
{
3757
	return pci_register_driver(&sky2_driver);
3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
}

static void __exit sky2_cleanup_module(void)
{
	pci_unregister_driver(&sky2_driver);
}

module_init(sky2_init_module);
module_exit(sky2_cleanup_module);

MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
3769
MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
3770
MODULE_LICENSE("GPL");
3771
MODULE_VERSION(DRV_VERSION);