ich8lan.c 140.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Intel PRO/1000 Linux driver
 * Copyright(c) 1999 - 2014 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * The full GNU General Public License is included in this distribution in
 * the file called "COPYING".
 *
 * Contact Information:
 * Linux NICS <linux.nics@intel.com>
 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 */
21

B
Bruce Allan 已提交
22
/* 82562G 10/100 Network Connection
23 24 25 26 27 28 29 30 31 32 33
 * 82562G-2 10/100 Network Connection
 * 82562GT 10/100 Network Connection
 * 82562GT-2 10/100 Network Connection
 * 82562V 10/100 Network Connection
 * 82562V-2 10/100 Network Connection
 * 82566DC-2 Gigabit Network Connection
 * 82566DC Gigabit Network Connection
 * 82566DM-2 Gigabit Network Connection
 * 82566DM Gigabit Network Connection
 * 82566MC Gigabit Network Connection
 * 82566MM Gigabit Network Connection
34 35
 * 82567LM Gigabit Network Connection
 * 82567LF Gigabit Network Connection
36
 * 82567V Gigabit Network Connection
37 38 39
 * 82567LM-2 Gigabit Network Connection
 * 82567LF-2 Gigabit Network Connection
 * 82567V-2 Gigabit Network Connection
40 41
 * 82567LF-3 Gigabit Network Connection
 * 82567LM-3 Gigabit Network Connection
42
 * 82567LM-4 Gigabit Network Connection
43 44 45 46
 * 82577LM Gigabit Network Connection
 * 82577LC Gigabit Network Connection
 * 82578DM Gigabit Network Connection
 * 82578DC Gigabit Network Connection
47 48
 * 82579LM Gigabit Network Connection
 * 82579V Gigabit Network Connection
49 50 51 52 53 54 55 56
 * Ethernet Connection I217-LM
 * Ethernet Connection I217-V
 * Ethernet Connection I218-V
 * Ethernet Connection I218-LM
 * Ethernet Connection (2) I218-LM
 * Ethernet Connection (2) I218-V
 * Ethernet Connection (3) I218-LM
 * Ethernet Connection (3) I218-V
57 58 59 60 61 62 63 64
 */

#include "e1000.h"

/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
	struct ich8_hsfsts {
65 66 67 68 69 70 71 72 73
		u16 flcdone:1;	/* bit 0 Flash Cycle Done */
		u16 flcerr:1;	/* bit 1 Flash Cycle Error */
		u16 dael:1;	/* bit 2 Direct Access error Log */
		u16 berasesz:2;	/* bit 4:3 Sector Erase Size */
		u16 flcinprog:1;	/* bit 5 flash cycle in Progress */
		u16 reserved1:2;	/* bit 13:6 Reserved */
		u16 reserved2:6;	/* bit 13:6 Reserved */
		u16 fldesvalid:1;	/* bit 14 Flash Descriptor Valid */
		u16 flockdn:1;	/* bit 15 Flash Config Lock-Down */
74 75 76 77 78 79 80 81
	} hsf_status;
	u16 regval;
};

/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
/* Offset 06h FLCTL */
union ich8_hws_flash_ctrl {
	struct ich8_hsflctl {
82 83 84 85 86
		u16 flcgo:1;	/* 0 Flash Cycle Go */
		u16 flcycle:2;	/* 2:1 Flash Cycle */
		u16 reserved:5;	/* 7:3 Reserved  */
		u16 fldbcount:2;	/* 9:8 Flash Data Byte Count */
		u16 flockdn:6;	/* 15:10 Reserved */
87 88 89 90 91 92 93
	} hsf_ctrl;
	u16 regval;
};

/* ICH Flash Region Access Permissions */
union ich8_hws_flash_regacc {
	struct ich8_flracc {
94 95 96 97
		u32 grra:8;	/* 0:7 GbE region Read Access */
		u32 grwa:8;	/* 8:15 GbE region Write Access */
		u32 gmrag:8;	/* 23:16 GbE Master Read Access Grant */
		u32 gmwag:8;	/* 31:24 GbE Master Write Access Grant */
98 99 100 101
	} hsf_flregacc;
	u16 regval;
};

102 103 104
/* ICH Flash Protected Region */
union ich8_flash_protected_range {
	struct ich8_pr {
B
Bruce Allan 已提交
105 106 107 108 109 110
		u32 base:13;	/* 0:12 Protected Range Base */
		u32 reserved1:2;	/* 13:14 Reserved */
		u32 rpe:1;	/* 15 Read Protection Enable */
		u32 limit:13;	/* 16:28 Protected Range Limit */
		u32 reserved2:2;	/* 29:30 Reserved */
		u32 wpe:1;	/* 31 Write Protection Enable */
111 112 113 114
	} range;
	u32 regval;
};

115 116 117 118 119
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
						u32 offset, u8 byte);
120 121
static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
					 u8 *data);
122 123 124 125 126
static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
					 u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
					 u8 size, u16 *data);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127 128 129 130 131 132 133 134
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
135
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
136
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
137
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
138
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
139
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
140 141
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
142 143 144
static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
145
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147
static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
148
static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
149
static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172

static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
	return readw(hw->flash_address + reg);
}

static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
{
	return readl(hw->flash_address + reg);
}

static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
{
	writew(val, hw->flash_address + reg);
}

static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
{
	writel(val, hw->flash_address + reg);
}

#define er16flash(reg)		__er16flash(hw, (reg))
#define er32flash(reg)		__er32flash(hw, (reg))
173 174
#define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
#define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
175

176 177 178 179 180 181 182 183 184 185 186
/**
 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
 *  @hw: pointer to the HW structure
 *
 *  Test access to the PHY registers by reading the PHY ID registers.  If
 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
 *  otherwise assume the read PHY ID is correct if it is valid.
 *
 *  Assumes the sw/fw/hw semaphore is already acquired.
 **/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
187
{
188 189
	u16 phy_reg = 0;
	u32 phy_id = 0;
190
	s32 ret_val = 0;
191
	u16 retry_count;
192
	u32 mac_reg = 0;
193 194

	for (retry_count = 0; retry_count < 2; retry_count++) {
195
		ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
196 197 198 199
		if (ret_val || (phy_reg == 0xFFFF))
			continue;
		phy_id = (u32)(phy_reg << 16);

200
		ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
201 202 203 204 205 206 207
		if (ret_val || (phy_reg == 0xFFFF)) {
			phy_id = 0;
			continue;
		}
		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
		break;
	}
208 209 210

	if (hw->phy.id) {
		if (hw->phy.id == phy_id)
211
			goto out;
212 213 214
	} else if (phy_id) {
		hw->phy.id = phy_id;
		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
215
		goto out;
216 217
	}

B
Bruce Allan 已提交
218
	/* In case the PHY needs to be in mdio slow mode,
219 220
	 * set slow mode and try to get the PHY id again.
	 */
221 222 223 224 225 226 227
	if (hw->mac.type < e1000_pch_lpt) {
		hw->phy.ops.release(hw);
		ret_val = e1000_set_mdio_slow_mode_hv(hw);
		if (!ret_val)
			ret_val = e1000e_get_phy_id(hw);
		hw->phy.ops.acquire(hw);
	}
228

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	if (ret_val)
		return false;
out:
	if (hw->mac.type == e1000_pch_lpt) {
		/* Unforce SMBus mode in PHY */
		e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
		e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);

		/* Unforce SMBus mode in MAC */
		mac_reg = er32(CTRL_EXT);
		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
		ew32(CTRL_EXT, mac_reg);
	}

	return true;
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/**
 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
 *  @hw: pointer to the HW structure
 *
 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
 *  used to reset the PHY to a quiescent state when necessary.
 **/
static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
{
	u32 mac_reg;

	/* Set Phy Config Counter to 50msec */
	mac_reg = er32(FEXTNVM3);
	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
	ew32(FEXTNVM3, mac_reg);

	/* Toggle LANPHYPC Value bit */
	mac_reg = er32(CTRL);
	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
	ew32(CTRL, mac_reg);
	e1e_flush();
	usleep_range(10, 20);
	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
	ew32(CTRL, mac_reg);
	e1e_flush();

	if (hw->mac.type < e1000_pch_lpt) {
		msleep(50);
	} else {
		u16 count = 20;

		do {
			usleep_range(5000, 10000);
		} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);

		msleep(30);
	}
}

288 289 290 291 292 293 294 295 296
/**
 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
 *  @hw: pointer to the HW structure
 *
 *  Workarounds/flow necessary for PHY initialization during driver load
 *  and resume paths.
 **/
static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
297
	struct e1000_adapter *adapter = hw->adapter;
298 299 300
	u32 mac_reg, fwsm = er32(FWSM);
	s32 ret_val;

301 302 303 304 305
	/* Gate automatic PHY configuration by hardware on managed and
	 * non-managed 82579 and newer adapters.
	 */
	e1000_gate_hw_phy_config_ich8lan(hw, true);

306 307 308 309 310 311
	/* It is not possible to be certain of the current state of ULP
	 * so forcibly disable it.
	 */
	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
	e1000_disable_ulp_lpt_lp(hw, true);

312 313 314
	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val) {
		e_dbg("Failed to initialize PHY flow\n");
315
		goto out;
316 317
	}

B
Bruce Allan 已提交
318
	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
319 320 321 322
	 * inaccessible and resetting the PHY is not blocked, toggle the
	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
	 */
	switch (hw->mac.type) {
B
Bruce Allan 已提交
323 324 325 326
	case e1000_pch_lpt:
		if (e1000_phy_is_accessible_pchlan(hw))
			break;

B
Bruce Allan 已提交
327
		/* Before toggling LANPHYPC, see if PHY is accessible by
B
Bruce Allan 已提交
328 329 330 331 332 333
		 * forcing MAC to SMBus mode first.
		 */
		mac_reg = er32(CTRL_EXT);
		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
		ew32(CTRL_EXT, mac_reg);

334 335 336 337 338 339
		/* Wait 50 milliseconds for MAC to finish any retries
		 * that it might be trying to perform from previous
		 * attempts to acknowledge any phy read requests.
		 */
		msleep(50);

B
Bruce Allan 已提交
340
		/* fall-through */
341
	case e1000_pch2lan:
342
		if (e1000_phy_is_accessible_pchlan(hw))
343 344 345 346 347 348 349 350 351 352
			break;

		/* fall-through */
	case e1000_pchlan:
		if ((hw->mac.type == e1000_pchlan) &&
		    (fwsm & E1000_ICH_FWSM_FW_VALID))
			break;

		if (hw->phy.ops.check_reset_block(hw)) {
			e_dbg("Required LANPHYPC toggle blocked by ME\n");
353
			ret_val = -E1000_ERR_PHY;
354 355 356 357
			break;
		}

		/* Toggle LANPHYPC Value bit */
358 359
		e1000_toggle_lanphypc_pch_lpt(hw);
		if (hw->mac.type >= e1000_pch_lpt) {
360 361 362 363 364 365 366 367 368 369 370 371 372 373
			if (e1000_phy_is_accessible_pchlan(hw))
				break;

			/* Toggling LANPHYPC brings the PHY out of SMBus mode
			 * so ensure that the MAC is also out of SMBus mode
			 */
			mac_reg = er32(CTRL_EXT);
			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
			ew32(CTRL_EXT, mac_reg);

			if (e1000_phy_is_accessible_pchlan(hw))
				break;

			ret_val = -E1000_ERR_PHY;
B
Bruce Allan 已提交
374
		}
375 376 377 378 379 380
		break;
	default:
		break;
	}

	hw->phy.ops.release(hw);
381
	if (!ret_val) {
382 383 384 385 386 387 388

		/* Check to see if able to reset PHY.  Print error if not */
		if (hw->phy.ops.check_reset_block(hw)) {
			e_err("Reset blocked by ME\n");
			goto out;
		}

389 390 391 392 393 394
		/* Reset the PHY before any access to it.  Doing so, ensures
		 * that the PHY is in a known good state before we read/write
		 * PHY registers.  The generic reset is sufficient here,
		 * because we haven't determined the PHY type yet.
		 */
		ret_val = e1000e_phy_hw_reset_generic(hw);
395 396 397 398 399 400 401 402 403 404 405 406
		if (ret_val)
			goto out;

		/* On a successful reset, possibly need to wait for the PHY
		 * to quiesce to an accessible state before returning control
		 * to the calling function.  If the PHY does not quiesce, then
		 * return E1000E_BLK_PHY_RESET, as this is the condition that
		 *  the PHY is in.
		 */
		ret_val = hw->phy.ops.check_reset_block(hw);
		if (ret_val)
			e_err("ME blocked access to PHY after reset\n");
407
	}
408

409
out:
410 411 412 413 414 415 416 417
	/* Ungate automatic PHY configuration on non-managed 82579 */
	if ((hw->mac.type == e1000_pch2lan) &&
	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
		usleep_range(10000, 20000);
		e1000_gate_hw_phy_config_ich8lan(hw, false);
	}

	return ret_val;
418 419
}

420 421 422 423 424 425 426 427 428
/**
 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
 *  @hw: pointer to the HW structure
 *
 *  Initialize family-specific PHY parameters and function pointers.
 **/
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
429
	s32 ret_val;
430

B
Bruce Allan 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	phy->addr = 1;
	phy->reset_delay_us = 100;

	phy->ops.set_page = e1000_set_page_igp;
	phy->ops.read_reg = e1000_read_phy_reg_hv;
	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
	phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
	phy->ops.write_reg = e1000_write_phy_reg_hv;
	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
	phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
	phy->ops.power_up = e1000_power_up_phy_copper;
	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
	phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
446

447
	phy->id = e1000_phy_unknown;
448

449 450 451
	ret_val = e1000_init_phy_workarounds_pchlan(hw);
	if (ret_val)
		return ret_val;
452

453 454 455 456 457 458 459 460 461 462
	if (phy->id == e1000_phy_unknown)
		switch (hw->mac.type) {
		default:
			ret_val = e1000e_get_phy_id(hw);
			if (ret_val)
				return ret_val;
			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
				break;
			/* fall-through */
		case e1000_pch2lan:
B
Bruce Allan 已提交
463
		case e1000_pch_lpt:
B
Bruce Allan 已提交
464
			/* In case the PHY needs to be in mdio slow mode,
465 466 467 468 469 470 471 472
			 * set slow mode and try to get the PHY id again.
			 */
			ret_val = e1000_set_mdio_slow_mode_hv(hw);
			if (ret_val)
				return ret_val;
			ret_val = e1000e_get_phy_id(hw);
			if (ret_val)
				return ret_val;
473
			break;
474
		}
475 476
	phy->type = e1000e_get_phy_type_from_id(phy->id);

477 478
	switch (phy->type) {
	case e1000_phy_82577:
479
	case e1000_phy_82579:
B
Bruce Allan 已提交
480
	case e1000_phy_i217:
481 482
		phy->ops.check_polarity = e1000_check_polarity_82577;
		phy->ops.force_speed_duplex =
483
		    e1000_phy_force_speed_duplex_82577;
484
		phy->ops.get_cable_length = e1000_get_cable_length_82577;
485 486
		phy->ops.get_info = e1000_get_phy_info_82577;
		phy->ops.commit = e1000e_phy_sw_reset;
487
		break;
488 489 490 491 492 493 494 495 496
	case e1000_phy_82578:
		phy->ops.check_polarity = e1000_check_polarity_m88;
		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
		phy->ops.get_info = e1000e_get_phy_info_m88;
		break;
	default:
		ret_val = -E1000_ERR_PHY;
		break;
497 498 499 500 501
	}

	return ret_val;
}

502 503 504 505 506 507 508 509 510 511 512 513
/**
 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
 *  @hw: pointer to the HW structure
 *
 *  Initialize family-specific PHY parameters and function pointers.
 **/
static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	s32 ret_val;
	u16 i = 0;

B
Bruce Allan 已提交
514 515
	phy->addr = 1;
	phy->reset_delay_us = 100;
516

B
Bruce Allan 已提交
517 518
	phy->ops.power_up = e1000_power_up_phy_copper;
	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
519

B
Bruce Allan 已提交
520
	/* We may need to do this twice - once for IGP and if that fails,
521 522 523 524
	 * we'll set BM func pointers and try again
	 */
	ret_val = e1000e_determine_phy_address(hw);
	if (ret_val) {
525
		phy->ops.write_reg = e1000e_write_phy_reg_bm;
B
Bruce Allan 已提交
526
		phy->ops.read_reg = e1000e_read_phy_reg_bm;
527
		ret_val = e1000e_determine_phy_address(hw);
B
Bruce Allan 已提交
528 529
		if (ret_val) {
			e_dbg("Cannot determine PHY addr. Erroring out\n");
530
			return ret_val;
B
Bruce Allan 已提交
531
		}
532 533
	}

534 535 536
	phy->id = 0;
	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
	       (i++ < 100)) {
537
		usleep_range(1000, 2000);
538 539 540 541 542 543 544 545 546 547
		ret_val = e1000e_get_phy_id(hw);
		if (ret_val)
			return ret_val;
	}

	/* Verify phy id */
	switch (phy->id) {
	case IGP03E1000_E_PHY_ID:
		phy->type = e1000_phy_igp_3;
		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
548 549
		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
550 551 552
		phy->ops.get_info = e1000e_get_phy_info_igp;
		phy->ops.check_polarity = e1000_check_polarity_igp;
		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
553 554 555 556 557 558
		break;
	case IFE_E_PHY_ID:
	case IFE_PLUS_E_PHY_ID:
	case IFE_C_E_PHY_ID:
		phy->type = e1000_phy_ife;
		phy->autoneg_mask = E1000_ALL_NOT_GIG;
559 560 561
		phy->ops.get_info = e1000_get_phy_info_ife;
		phy->ops.check_polarity = e1000_check_polarity_ife;
		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
562
		break;
563 564 565
	case BME1000_E_PHY_ID:
		phy->type = e1000_phy_bm;
		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
566 567 568
		phy->ops.read_reg = e1000e_read_phy_reg_bm;
		phy->ops.write_reg = e1000e_write_phy_reg_bm;
		phy->ops.commit = e1000e_phy_sw_reset;
569 570 571
		phy->ops.get_info = e1000e_get_phy_info_m88;
		phy->ops.check_polarity = e1000_check_polarity_m88;
		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
572
		break;
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
	default:
		return -E1000_ERR_PHY;
		break;
	}

	return 0;
}

/**
 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
 *  @hw: pointer to the HW structure
 *
 *  Initialize family-specific NVM parameters and function
 *  pointers.
 **/
static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
{
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
592
	u32 gfpreg, sector_base_addr, sector_end_addr;
593 594
	u16 i;

595
	/* Can't read flash registers if the register set isn't mapped. */
596
	if (!hw->flash_address) {
597
		e_dbg("ERROR: Flash registers not mapped\n");
598 599 600 601 602 603 604
		return -E1000_ERR_CONFIG;
	}

	nvm->type = e1000_nvm_flash_sw;

	gfpreg = er32flash(ICH_FLASH_GFPREG);

B
Bruce Allan 已提交
605
	/* sector_X_addr is a "sector"-aligned address (4096 bytes)
606
	 * Add 1 to sector_end_addr since this sector is included in
607 608
	 * the overall size.
	 */
609 610 611 612 613 614
	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;

	/* flash_base_addr is byte-aligned */
	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;

B
Bruce Allan 已提交
615
	/* find total size of the NVM, then cut in half since the total
616 617
	 * size represents two separate NVM banks.
	 */
618 619
	nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
				<< FLASH_SECTOR_ADDR_SHIFT);
620 621 622 623 624 625 626 627
	nvm->flash_bank_size /= 2;
	/* Adjust to word count */
	nvm->flash_bank_size /= sizeof(u16);

	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;

	/* Clear shadow ram */
	for (i = 0; i < nvm->word_size; i++) {
628
		dev_spec->shadow_ram[i].modified = false;
B
Bruce Allan 已提交
629
		dev_spec->shadow_ram[i].value = 0xFFFF;
630 631 632 633 634 635 636 637 638 639 640 641
	}

	return 0;
}

/**
 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
 *  @hw: pointer to the HW structure
 *
 *  Initialize family-specific MAC parameters and function
 *  pointers.
 **/
642
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
643 644 645 646
{
	struct e1000_mac_info *mac = &hw->mac;

	/* Set media type function pointer */
647
	hw->phy.media_type = e1000_media_type_copper;
648 649 650 651 652 653 654

	/* Set mta register count */
	mac->mta_reg_count = 32;
	/* Set rar entry count */
	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
	if (mac->type == e1000_ich8lan)
		mac->rar_entry_count--;
655 656 657 658
	/* FWSM register */
	mac->has_fwsm = true;
	/* ARC subsystem not supported */
	mac->arc_subsystem_valid = false;
659 660
	/* Adaptive IFS supported */
	mac->adaptive_ifs = true;
661

B
Bruce Allan 已提交
662
	/* LED and other operations */
663 664 665 666
	switch (mac->type) {
	case e1000_ich8lan:
	case e1000_ich9lan:
	case e1000_ich10lan:
667 668
		/* check management mode */
		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
669
		/* ID LED init */
670
		mac->ops.id_led_init = e1000e_id_led_init_generic;
671 672
		/* blink LED */
		mac->ops.blink_led = e1000e_blink_led_generic;
673 674 675 676 677 678 679 680
		/* setup LED */
		mac->ops.setup_led = e1000e_setup_led_generic;
		/* cleanup LED */
		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
		/* turn on/off LED */
		mac->ops.led_on = e1000_led_on_ich8lan;
		mac->ops.led_off = e1000_led_off_ich8lan;
		break;
681
	case e1000_pch2lan:
682 683 684
		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
		mac->ops.rar_set = e1000_rar_set_pch2lan;
		/* fall-through */
B
Bruce Allan 已提交
685
	case e1000_pch_lpt:
686
	case e1000_pchlan:
687 688
		/* check management mode */
		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
689 690 691 692 693 694 695 696 697 698 699 700 701 702
		/* ID LED init */
		mac->ops.id_led_init = e1000_id_led_init_pchlan;
		/* setup LED */
		mac->ops.setup_led = e1000_setup_led_pchlan;
		/* cleanup LED */
		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
		/* turn on/off LED */
		mac->ops.led_on = e1000_led_on_pchlan;
		mac->ops.led_off = e1000_led_off_pchlan;
		break;
	default:
		break;
	}

B
Bruce Allan 已提交
703 704 705
	if (mac->type == e1000_pch_lpt) {
		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
		mac->ops.rar_set = e1000_rar_set_pch_lpt;
706 707
		mac->ops.setup_physical_interface =
		    e1000_setup_copper_link_pch_lpt;
708
		mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
B
Bruce Allan 已提交
709 710
	}

711 712
	/* Enable PCS Lock-loss workaround for ICH8 */
	if (mac->type == e1000_ich8lan)
713
		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
714 715 716 717

	return 0;
}

718 719 720 721 722 723 724 725 726 727 728 729
/**
 *  __e1000_access_emi_reg_locked - Read/write EMI register
 *  @hw: pointer to the HW structure
 *  @addr: EMI address to program
 *  @data: pointer to value to read/write from/to the EMI address
 *  @read: boolean flag to indicate read or write
 *
 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
 **/
static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
					 u16 *data, bool read)
{
730
	s32 ret_val;
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751

	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
	if (ret_val)
		return ret_val;

	if (read)
		ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
	else
		ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);

	return ret_val;
}

/**
 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
 *  @hw: pointer to the HW structure
 *  @addr: EMI address to program
 *  @data: value to be read from the EMI address
 *
 *  Assumes the SW/FW/HW Semaphore is already acquired.
 **/
752
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
753 754 755 756 757 758 759 760 761 762 763 764
{
	return __e1000_access_emi_reg_locked(hw, addr, data, true);
}

/**
 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
 *  @hw: pointer to the HW structure
 *  @addr: EMI address to program
 *  @data: value to be written to the EMI address
 *
 *  Assumes the SW/FW/HW Semaphore is already acquired.
 **/
765
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
766 767 768 769
{
	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
}

770 771 772 773
/**
 *  e1000_set_eee_pchlan - Enable/disable EEE support
 *  @hw: pointer to the HW structure
 *
774 775 776
 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
 *  the link and the EEE capabilities of the link partner.  The LPI Control
 *  register bits will remain set only if/when link is up.
777 778 779 780 781 782
 *
 *  EEE LPI must not be asserted earlier than one second after link is up.
 *  On 82579, EEE LPI should not be enabled until such time otherwise there
 *  can be link issues with some switches.  Other devices can have EEE LPI
 *  enabled immediately upon link up since they have a timer in hardware which
 *  prevents LPI from being asserted too early.
783
 **/
784
s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
785
{
B
Bruce Allan 已提交
786
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
787
	s32 ret_val;
788
	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
789

790 791 792 793 794 795 796 797 798 799 800 801
	switch (hw->phy.type) {
	case e1000_phy_82579:
		lpa = I82579_EEE_LP_ABILITY;
		pcs_status = I82579_EEE_PCS_STATUS;
		adv_addr = I82579_EEE_ADVERTISEMENT;
		break;
	case e1000_phy_i217:
		lpa = I217_EEE_LP_ABILITY;
		pcs_status = I217_EEE_PCS_STATUS;
		adv_addr = I217_EEE_ADVERTISEMENT;
		break;
	default:
802
		return 0;
803
	}
804

805
	ret_val = hw->phy.ops.acquire(hw);
806
	if (ret_val)
807
		return ret_val;
808

809
	ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
B
Bruce Allan 已提交
810
	if (ret_val)
811 812 813 814 815 816 817
		goto release;

	/* Clear bits that enable EEE in various speeds */
	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;

	/* Enable EEE if not disabled by user */
	if (!dev_spec->eee_disable) {
B
Bruce Allan 已提交
818
		/* Save off link partner's EEE ability */
819
		ret_val = e1000_read_emi_reg_locked(hw, lpa,
820
						    &dev_spec->eee_lp_ability);
B
Bruce Allan 已提交
821 822 823
		if (ret_val)
			goto release;

824 825 826 827 828
		/* Read EEE advertisement */
		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
		if (ret_val)
			goto release;

829
		/* Enable EEE only for speeds in which the link partner is
830
		 * EEE capable and for which we advertise EEE.
B
Bruce Allan 已提交
831
		 */
832
		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
833 834
			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;

835
		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
836 837
			e1e_rphy_locked(hw, MII_LPA, &data);
			if (data & LPA_100FULL)
838 839 840 841 842 843 844 845 846
				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
			else
				/* EEE is not supported in 100Half, so ignore
				 * partner's EEE in 100 ability if full-duplex
				 * is not advertised.
				 */
				dev_spec->eee_lp_ability &=
				    ~I82579_EEE_100_SUPPORTED;
		}
B
Bruce Allan 已提交
847 848
	}

849 850 851 852 853 854 855 856 857 858 859
	if (hw->phy.type == e1000_phy_82579) {
		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
						    &data);
		if (ret_val)
			goto release;

		data &= ~I82579_LPI_100_PLL_SHUT;
		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
						     data);
	}

860 861 862 863 864
	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
	if (ret_val)
		goto release;

865 866 867 868 869
	ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
release:
	hw->phy.ops.release(hw);

	return ret_val;
870 871
}

872 873 874 875 876 877 878 879
/**
 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
 *  @hw:   pointer to the HW structure
 *  @link: link up bool flag
 *
 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
 *  preventing further DMA write requests.  Workaround the issue by disabling
 *  the de-assertion of the clock request when in 1Gpbs mode.
880 881
 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
 *  speeds in order to avoid Tx hangs.
882 883 884 885
 **/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
	u32 fextnvm6 = er32(FEXTNVM6);
886
	u32 status = er32(STATUS);
887
	s32 ret_val = 0;
888
	u16 reg;
889

890
	if (link && (status & E1000_STATUS_SPEED_1000)) {
891 892 893 894 895 896
		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val)
			return ret_val;

		ret_val =
		    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
897
						&reg);
898 899 900 901 902 903
		if (ret_val)
			goto release;

		ret_val =
		    e1000e_write_kmrn_reg_locked(hw,
						 E1000_KMRNCTRLSTA_K1_CONFIG,
904
						 reg &
905 906 907 908 909 910 911 912 913 914 915
						 ~E1000_KMRNCTRLSTA_K1_ENABLE);
		if (ret_val)
			goto release;

		usleep_range(10, 20);

		ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);

		ret_val =
		    e1000e_write_kmrn_reg_locked(hw,
						 E1000_KMRNCTRLSTA_K1_CONFIG,
916
						 reg);
917 918 919 920
release:
		hw->phy.ops.release(hw);
	} else {
		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;

		if (!link || ((status & E1000_STATUS_SPEED_100) &&
			      (status & E1000_STATUS_FD)))
			goto update_fextnvm6;

		ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
		if (ret_val)
			return ret_val;

		/* Clear link status transmit timeout */
		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;

		if (status & E1000_STATUS_SPEED_100) {
			/* Set inband Tx timeout to 5x10us for 100Half */
			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;

			/* Do not extend the K1 entry latency for 100Half */
			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
		} else {
			/* Set inband Tx timeout to 50x10us for 10Full/Half */
			reg |= 50 <<
			    I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;

			/* Extend the K1 entry latency for 10 Mbps */
			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
		}

		ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
		if (ret_val)
			return ret_val;

update_fextnvm6:
		ew32(FEXTNVM6, fextnvm6);
955 956 957 958 959
	}

	return ret_val;
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
/**
 *  e1000_platform_pm_pch_lpt - Set platform power management values
 *  @hw: pointer to the HW structure
 *  @link: bool indicating link status
 *
 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
 *  when link is up (which must not exceed the maximum latency supported
 *  by the platform), otherwise specify there is no LTR requirement.
 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
 *  Capability register set, on this device LTR is set by writing the
 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
 *  message to the PMC.
 **/
static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
	    link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
	u16 lat_enc = 0;	/* latency encoded */

	if (link) {
		u16 speed, duplex, scale = 0;
		u16 max_snoop, max_nosnoop;
		u16 max_ltr_enc;	/* max LTR latency encoded */
		s64 lat_ns;	/* latency (ns) */
		s64 value;
		u32 rxa;

		if (!hw->adapter->max_frame_size) {
			e_dbg("max_frame_size not set.\n");
			return -E1000_ERR_CONFIG;
		}

		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
		if (!speed) {
			e_dbg("Speed not set.\n");
			return -E1000_ERR_CONFIG;
		}

		/* Rx Packet Buffer Allocation size (KB) */
		rxa = er32(PBA) & E1000_PBA_RXA_MASK;

		/* Determine the maximum latency tolerated by the device.
		 *
		 * Per the PCIe spec, the tolerated latencies are encoded as
		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
		 * a 10-bit value (0-1023) to provide a range from 1 ns to
		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
		 */
		lat_ns = ((s64)rxa * 1024 -
			  (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
		if (lat_ns < 0)
			lat_ns = 0;
		else
			do_div(lat_ns, speed);

		value = lat_ns;
		while (value > PCI_LTR_VALUE_MASK) {
			scale++;
			value = DIV_ROUND_UP(value, (1 << 5));
		}
		if (scale > E1000_LTRV_SCALE_MAX) {
			e_dbg("Invalid LTR latency scale %d\n", scale);
			return -E1000_ERR_CONFIG;
		}
		lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);

		/* Determine the maximum latency tolerated by the platform */
		pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
				     &max_snoop);
		pci_read_config_word(hw->adapter->pdev,
				     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
		max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);

		if (lat_enc > max_ltr_enc)
			lat_enc = max_ltr_enc;
	}

	/* Set Snoop and No-Snoop latencies the same */
	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
	ew32(LTRV, reg);

	return 0;
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
/**
 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
 *  @hw: pointer to the HW structure
 *  @to_sx: boolean indicating a system power state transition to Sx
 *
 *  When link is down, configure ULP mode to significantly reduce the power
 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
 *  ME firmware to start the ULP configuration.  If not on an ME enabled
 *  system, configure the ULP mode by software.
 */
s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
{
	u32 mac_reg;
	s32 ret_val = 0;
	u16 phy_reg;

	if ((hw->mac.type < e1000_pch_lpt) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
		return 0;

	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
		/* Request ME configure ULP mode in the PHY */
		mac_reg = er32(H2ME);
		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
		ew32(H2ME, mac_reg);

		goto out;
	}

	if (!to_sx) {
		int i = 0;

		/* Poll up to 5 seconds for Cable Disconnected indication */
		while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
			/* Bail if link is re-acquired */
			if (er32(STATUS) & E1000_STATUS_LU)
				return -E1000_ERR_PHY;

			if (i++ == 100)
				break;

			msleep(50);
		}
		e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
		      (er32(FEXT) &
		       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
	}

	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
		goto out;

	/* Force SMBus mode in PHY */
	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
	if (ret_val)
		goto release;
	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);

	/* Force SMBus mode in MAC */
	mac_reg = er32(CTRL_EXT);
	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
	ew32(CTRL_EXT, mac_reg);

	/* Set Inband ULP Exit, Reset to SMBus mode and
	 * Disable SMBus Release on PERST# in PHY
	 */
	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
	if (ret_val)
		goto release;
	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
	if (to_sx) {
		if (er32(WUFC) & E1000_WUFC_LNKC)
			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;

		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
	} else {
		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
	}
	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);

	/* Set Disable SMBus Release on PERST# in MAC */
	mac_reg = er32(FEXTNVM7);
	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
	ew32(FEXTNVM7, mac_reg);

	/* Commit ULP changes in PHY by starting auto ULP configuration */
	phy_reg |= I218_ULP_CONFIG1_START;
	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
release:
	hw->phy.ops.release(hw);
out:
	if (ret_val)
		e_dbg("Error in ULP enable flow: %d\n", ret_val);
	else
		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;

	return ret_val;
}

/**
 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
 *  @hw: pointer to the HW structure
 *  @force: boolean indicating whether or not to force disabling ULP
 *
 *  Un-configure ULP mode when link is up, the system is transitioned from
 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
 *  system, poll for an indication from ME that ULP has been un-configured.
 *  If not on an ME enabled system, un-configure the ULP mode by software.
 *
 *  During nominal operation, this function is called when link is acquired
 *  to disable ULP mode (force=false); otherwise, for example when unloading
 *  the driver or during Sx->S0 transitions, this is called with force=true
 *  to forcibly disable ULP.
 */
static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
{
	s32 ret_val = 0;
	u32 mac_reg;
	u16 phy_reg;
	int i = 0;

	if ((hw->mac.type < e1000_pch_lpt) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
		return 0;

	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
		if (force) {
			/* Request ME un-configure ULP mode in the PHY */
			mac_reg = er32(H2ME);
			mac_reg &= ~E1000_H2ME_ULP;
			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
			ew32(H2ME, mac_reg);
		}

		/* Poll up to 100msec for ME to clear ULP_CFG_DONE */
		while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
			if (i++ == 10) {
				ret_val = -E1000_ERR_PHY;
				goto out;
			}

			usleep_range(10000, 20000);
		}
		e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);

		if (force) {
			mac_reg = er32(H2ME);
			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
			ew32(H2ME, mac_reg);
		} else {
			/* Clear H2ME.ULP after ME ULP configuration */
			mac_reg = er32(H2ME);
			mac_reg &= ~E1000_H2ME_ULP;
			ew32(H2ME, mac_reg);
		}

		goto out;
	}

	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
		goto out;

	if (force)
		/* Toggle LANPHYPC Value bit */
		e1000_toggle_lanphypc_pch_lpt(hw);

	/* Unforce SMBus mode in PHY */
	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
	if (ret_val) {
		/* The MAC might be in PCIe mode, so temporarily force to
		 * SMBus mode in order to access the PHY.
		 */
		mac_reg = er32(CTRL_EXT);
		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
		ew32(CTRL_EXT, mac_reg);

		msleep(50);

		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
						       &phy_reg);
		if (ret_val)
			goto release;
	}
	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);

	/* Unforce SMBus mode in MAC */
	mac_reg = er32(CTRL_EXT);
	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
	ew32(CTRL_EXT, mac_reg);

	/* When ULP mode was previously entered, K1 was disabled by the
	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
	 */
	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
	if (ret_val)
		goto release;
	phy_reg |= HV_PM_CTRL_K1_ENABLE;
	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);

	/* Clear ULP enabled configuration */
	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
	if (ret_val)
		goto release;
	phy_reg &= ~(I218_ULP_CONFIG1_IND |
		     I218_ULP_CONFIG1_STICKY_ULP |
		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
		     I218_ULP_CONFIG1_WOL_HOST |
		     I218_ULP_CONFIG1_INBAND_EXIT |
		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);

	/* Commit ULP changes by starting auto ULP configuration */
	phy_reg |= I218_ULP_CONFIG1_START;
	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);

	/* Clear Disable SMBus Release on PERST# in MAC */
	mac_reg = er32(FEXTNVM7);
	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
	ew32(FEXTNVM7, mac_reg);

release:
	hw->phy.ops.release(hw);
	if (force) {
		e1000_phy_hw_reset(hw);
		msleep(50);
	}
out:
	if (ret_val)
		e_dbg("Error in ULP disable flow: %d\n", ret_val);
	else
		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;

	return ret_val;
}

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
/**
 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
 *  @hw: pointer to the HW structure
 *
 *  Checks to see of the link status of the hardware has changed.  If a
 *  change in link status has been detected, then we read the PHY registers
 *  to get the current speed/duplex if link exists.
 **/
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
	struct e1000_mac_info *mac = &hw->mac;
	s32 ret_val;
	bool link;
1308
	u16 phy_reg;
1309

B
Bruce Allan 已提交
1310
	/* We only want to go out to the PHY registers to see if Auto-Neg
1311 1312 1313 1314
	 * has completed and/or if our link status has changed.  The
	 * get_link_status flag is set upon receiving a Link Status
	 * Change or Rx Sequence Error interrupt.
	 */
1315 1316
	if (!mac->get_link_status)
		return 0;
1317

B
Bruce Allan 已提交
1318
	/* First we want to see if the MII Status Register reports
1319 1320 1321 1322 1323
	 * link.  If so, then we want to get the current speed/duplex
	 * of the PHY.
	 */
	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
	if (ret_val)
1324
		return ret_val;
1325

1326 1327 1328
	if (hw->mac.type == e1000_pchlan) {
		ret_val = e1000_k1_gig_workaround_hv(hw, link);
		if (ret_val)
1329
			return ret_val;
1330 1331
	}

1332
	/* When connected at 10Mbps half-duplex, some parts are excessively
1333 1334 1335
	 * aggressive resulting in many collisions. To avoid this, increase
	 * the IPG and reduce Rx latency in the PHY.
	 */
1336 1337
	if (((hw->mac.type == e1000_pch2lan) ||
	     (hw->mac.type == e1000_pch_lpt)) && link) {
1338
		u32 reg;
1339

1340 1341
		reg = er32(STATUS);
		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1342 1343
			u16 emi_addr;

1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
			reg = er32(TIPG);
			reg &= ~E1000_TIPG_IPGT_MASK;
			reg |= 0xFF;
			ew32(TIPG, reg);

			/* Reduce Rx latency in analog PHY */
			ret_val = hw->phy.ops.acquire(hw);
			if (ret_val)
				return ret_val;

1354 1355 1356 1357 1358 1359
			if (hw->mac.type == e1000_pch2lan)
				emi_addr = I82579_RX_CONFIG;
			else
				emi_addr = I217_RX_CONFIG;

			ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1360 1361 1362 1363 1364 1365 1366 1367

			hw->phy.ops.release(hw);

			if (ret_val)
				return ret_val;
		}
	}

1368 1369
	/* Work-around I218 hang issue */
	if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1370 1371 1372
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1373 1374 1375 1376 1377
		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
		if (ret_val)
			return ret_val;
	}

1378 1379 1380 1381 1382 1383 1384 1385 1386
	if (hw->mac.type == e1000_pch_lpt) {
		/* Set platform power management values for
		 * Latency Tolerance Reporting (LTR)
		 */
		ret_val = e1000_platform_pm_pch_lpt(hw, link);
		if (ret_val)
			return ret_val;
	}

B
Bruce Allan 已提交
1387 1388 1389
	/* Clear link partner's EEE ability */
	hw->dev_spec.ich8lan.eee_lp_ability = 0;

1390
	if (!link)
B
Bruce Allan 已提交
1391
		return 0;	/* No link detected */
1392 1393 1394

	mac->get_link_status = false;

1395 1396
	switch (hw->mac.type) {
	case e1000_pch2lan:
1397 1398
		ret_val = e1000_k1_workaround_lv(hw);
		if (ret_val)
1399
			return ret_val;
1400 1401 1402 1403 1404
		/* fall-thru */
	case e1000_pchlan:
		if (hw->phy.type == e1000_phy_82578) {
			ret_val = e1000_link_stall_workaround_hv(hw);
			if (ret_val)
1405
				return ret_val;
1406 1407
		}

B
Bruce Allan 已提交
1408
		/* Workaround for PCHx parts in half-duplex:
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
		 * Set the number of preambles removed from the packet
		 * when it is passed from the PHY to the MAC to prevent
		 * the MAC from misinterpreting the packet type.
		 */
		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;

		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);

		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
		break;
	default:
		break;
1423 1424
	}

B
Bruce Allan 已提交
1425
	/* Check if there was DownShift, must be checked
1426 1427 1428 1429
	 * immediately after link-up
	 */
	e1000e_check_downshift(hw);

1430
	/* Enable/Disable EEE after link up */
1431 1432 1433 1434 1435
	if (hw->phy.type > e1000_phy_82579) {
		ret_val = e1000_set_eee_pchlan(hw);
		if (ret_val)
			return ret_val;
	}
1436

B
Bruce Allan 已提交
1437
	/* If we are forcing speed/duplex, then we simply return since
1438 1439
	 * we have already determined whether we have link or not.
	 */
1440 1441
	if (!mac->autoneg)
		return -E1000_ERR_CONFIG;
1442

B
Bruce Allan 已提交
1443
	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1444 1445 1446
	 * of MAC speed/duplex configuration.  So we only need to
	 * configure Collision Distance in the MAC.
	 */
1447
	mac->ops.config_collision_dist(hw);
1448

B
Bruce Allan 已提交
1449
	/* Configure Flow Control now that Auto-Neg has completed.
1450 1451 1452 1453 1454 1455
	 * First, we need to restore the desired flow control
	 * settings because we may have had to re-autoneg with a
	 * different link partner.
	 */
	ret_val = e1000e_config_fc_after_link_up(hw);
	if (ret_val)
1456
		e_dbg("Error configuring flow control\n");
1457 1458 1459 1460

	return ret_val;
}

J
Jeff Kirsher 已提交
1461
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1462 1463 1464 1465
{
	struct e1000_hw *hw = &adapter->hw;
	s32 rc;

1466
	rc = e1000_init_mac_params_ich8lan(hw);
1467 1468 1469 1470 1471 1472 1473
	if (rc)
		return rc;

	rc = e1000_init_nvm_params_ich8lan(hw);
	if (rc)
		return rc;

1474 1475 1476 1477
	switch (hw->mac.type) {
	case e1000_ich8lan:
	case e1000_ich9lan:
	case e1000_ich10lan:
1478
		rc = e1000_init_phy_params_ich8lan(hw);
1479 1480 1481
		break;
	case e1000_pchlan:
	case e1000_pch2lan:
B
Bruce Allan 已提交
1482
	case e1000_pch_lpt:
1483 1484 1485 1486 1487
		rc = e1000_init_phy_params_pchlan(hw);
		break;
	default:
		break;
	}
1488 1489 1490
	if (rc)
		return rc;

B
Bruce Allan 已提交
1491
	/* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1492 1493 1494 1495 1496
	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
	 */
	if ((adapter->hw.phy.type == e1000_phy_ife) ||
	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1497 1498
		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
1499 1500

		hw->mac.ops.blink_led = NULL;
1501 1502
	}

1503
	if ((adapter->hw.mac.type == e1000_ich8lan) &&
1504
	    (adapter->hw.phy.type != e1000_phy_ife))
1505 1506
		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;

1507 1508 1509 1510 1511
	/* Enable workaround for 82579 w/ ME enabled */
	if ((adapter->hw.mac.type == e1000_pch2lan) &&
	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;

1512 1513 1514
	return 0;
}

1515 1516
static DEFINE_MUTEX(nvm_mutex);

1517 1518 1519 1520 1521 1522
/**
 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
 *  @hw: pointer to the HW structure
 *
 *  Acquires the mutex for performing NVM operations.
 **/
1523
static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
{
	mutex_lock(&nvm_mutex);

	return 0;
}

/**
 *  e1000_release_nvm_ich8lan - Release NVM mutex
 *  @hw: pointer to the HW structure
 *
 *  Releases the mutex used while performing NVM operations.
 **/
1536
static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1537 1538 1539 1540
{
	mutex_unlock(&nvm_mutex);
}

1541 1542 1543 1544
/**
 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
 *  @hw: pointer to the HW structure
 *
1545 1546
 *  Acquires the software control flag for performing PHY and select
 *  MAC CSR accesses.
1547 1548 1549
 **/
static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
{
1550 1551
	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
	s32 ret_val = 0;
1552

1553 1554
	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
			     &hw->adapter->state)) {
1555
		e_dbg("contention for Phy access\n");
1556 1557
		return -E1000_ERR_PHY;
	}
1558

1559 1560
	while (timeout) {
		extcnf_ctrl = er32(EXTCNF_CTRL);
1561 1562
		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
			break;
1563

1564 1565 1566 1567 1568
		mdelay(1);
		timeout--;
	}

	if (!timeout) {
1569
		e_dbg("SW has already locked the resource.\n");
1570 1571 1572 1573
		ret_val = -E1000_ERR_CONFIG;
		goto out;
	}

1574
	timeout = SW_FLAG_TIMEOUT;
1575 1576 1577 1578 1579 1580 1581 1582

	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
	ew32(EXTCNF_CTRL, extcnf_ctrl);

	while (timeout) {
		extcnf_ctrl = er32(EXTCNF_CTRL);
		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
			break;
1583

1584 1585 1586 1587 1588
		mdelay(1);
		timeout--;
	}

	if (!timeout) {
1589
		e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1590
		      er32(FWSM), extcnf_ctrl);
1591 1592
		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
		ew32(EXTCNF_CTRL, extcnf_ctrl);
1593 1594
		ret_val = -E1000_ERR_CONFIG;
		goto out;
1595 1596
	}

1597 1598
out:
	if (ret_val)
1599
		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1600 1601

	return ret_val;
1602 1603 1604 1605 1606 1607
}

/**
 *  e1000_release_swflag_ich8lan - Release software control flag
 *  @hw: pointer to the HW structure
 *
1608 1609
 *  Releases the software control flag for performing PHY and select
 *  MAC CSR accesses.
1610 1611 1612 1613 1614 1615
 **/
static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
{
	u32 extcnf_ctrl;

	extcnf_ctrl = er32(EXTCNF_CTRL);
1616 1617 1618 1619 1620 1621 1622

	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
		ew32(EXTCNF_CTRL, extcnf_ctrl);
	} else {
		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
	}
1623

1624
	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1625 1626
}

1627 1628 1629 1630
/**
 *  e1000_check_mng_mode_ich8lan - Checks management mode
 *  @hw: pointer to the HW structure
 *
1631
 *  This checks if the adapter has any manageability enabled.
1632 1633 1634 1635 1636
 *  This is a function pointer entry point only called by read/write
 *  routines for the PHY and NVM parts.
 **/
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
{
1637 1638 1639
	u32 fwsm;

	fwsm = er32(FWSM);
1640 1641 1642
	return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
		((fwsm & E1000_FWSM_MODE_MASK) ==
		 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1643
}
1644

1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
/**
 *  e1000_check_mng_mode_pchlan - Checks management mode
 *  @hw: pointer to the HW structure
 *
 *  This checks if the adapter has iAMT enabled.
 *  This is a function pointer entry point only called by read/write
 *  routines for the PHY and NVM parts.
 **/
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
{
	u32 fwsm;

	fwsm = er32(FWSM);
	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1659
	    (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1660 1661
}

1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
/**
 *  e1000_rar_set_pch2lan - Set receive address register
 *  @hw: pointer to the HW structure
 *  @addr: pointer to the receive address
 *  @index: receive address array register
 *
 *  Sets the receive address array register at index to the address passed
 *  in by addr.  For 82579, RAR[0] is the base address register that is to
 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
 *  Use SHRA[0-3] in place of those reserved for ME.
 **/
1673
static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1674 1675 1676
{
	u32 rar_low, rar_high;

B
Bruce Allan 已提交
1677
	/* HW expects these in little endian so we reverse the byte order
1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
	 * from network order (big endian) to little endian
	 */
	rar_low = ((u32)addr[0] |
		   ((u32)addr[1] << 8) |
		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));

	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));

	/* If MAC address zero, no need to set the AV bit */
	if (rar_low || rar_high)
		rar_high |= E1000_RAH_AV;

	if (index == 0) {
		ew32(RAL(index), rar_low);
		e1e_flush();
		ew32(RAH(index), rar_high);
		e1e_flush();
1695
		return 0;
1696 1697
	}

1698 1699 1700
	/* RAR[1-6] are owned by manageability.  Skip those and program the
	 * next address into the SHRA register array.
	 */
1701
	if (index < (u32)(hw->mac.rar_entry_count)) {
1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
		s32 ret_val;

		ret_val = e1000_acquire_swflag_ich8lan(hw);
		if (ret_val)
			goto out;

		ew32(SHRAL(index - 1), rar_low);
		e1e_flush();
		ew32(SHRAH(index - 1), rar_high);
		e1e_flush();

		e1000_release_swflag_ich8lan(hw);

		/* verify the register updates */
		if ((er32(SHRAL(index - 1)) == rar_low) &&
		    (er32(SHRAH(index - 1)) == rar_high))
1718
			return 0;
1719 1720 1721 1722 1723 1724 1725

		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
		      (index - 1), er32(FWSM));
	}

out:
	e_dbg("Failed to write receive address at index %d\n", index);
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	return -E1000_ERR_CONFIG;
}

/**
 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
 *  @hw: pointer to the HW structure
 *
 *  Get the number of available receive registers that the Host can
 *  program. SHRA[0-10] are the shared receive address registers
 *  that are shared between the Host and manageability engine (ME).
 *  ME can reserve any number of addresses and the host needs to be
 *  able to tell how many available registers it has access to.
 **/
static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
{
	u32 wlock_mac;
	u32 num_entries;

	wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
	wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;

	switch (wlock_mac) {
	case 0:
		/* All SHRA[0..10] and RAR[0] available */
		num_entries = hw->mac.rar_entry_count;
		break;
	case 1:
		/* Only RAR[0] available */
		num_entries = 1;
		break;
	default:
		/* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
		num_entries = wlock_mac + 1;
		break;
	}

	return num_entries;
1763 1764
}

B
Bruce Allan 已提交
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
/**
 *  e1000_rar_set_pch_lpt - Set receive address registers
 *  @hw: pointer to the HW structure
 *  @addr: pointer to the receive address
 *  @index: receive address array register
 *
 *  Sets the receive address register array at index to the address passed
 *  in by addr. For LPT, RAR[0] is the base address register that is to
 *  contain the MAC address. SHRA[0-10] are the shared receive address
 *  registers that are shared between the Host and manageability engine (ME).
 **/
1776
static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
B
Bruce Allan 已提交
1777 1778 1779 1780
{
	u32 rar_low, rar_high;
	u32 wlock_mac;

B
Bruce Allan 已提交
1781
	/* HW expects these in little endian so we reverse the byte order
B
Bruce Allan 已提交
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
	 * from network order (big endian) to little endian
	 */
	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));

	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));

	/* If MAC address zero, no need to set the AV bit */
	if (rar_low || rar_high)
		rar_high |= E1000_RAH_AV;

	if (index == 0) {
		ew32(RAL(index), rar_low);
		e1e_flush();
		ew32(RAH(index), rar_high);
		e1e_flush();
1798
		return 0;
B
Bruce Allan 已提交
1799 1800
	}

B
Bruce Allan 已提交
1801
	/* The manageability engine (ME) can lock certain SHRAR registers that
B
Bruce Allan 已提交
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
	 * it is using - those registers are unavailable for use.
	 */
	if (index < hw->mac.rar_entry_count) {
		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;

		/* Check if all SHRAR registers are locked */
		if (wlock_mac == 1)
			goto out;

		if ((wlock_mac == 0) || (index <= wlock_mac)) {
			s32 ret_val;

			ret_val = e1000_acquire_swflag_ich8lan(hw);

			if (ret_val)
				goto out;

			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
			e1e_flush();
			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
			e1e_flush();

			e1000_release_swflag_ich8lan(hw);

			/* verify the register updates */
			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1830
				return 0;
B
Bruce Allan 已提交
1831 1832 1833 1834 1835
		}
	}

out:
	e_dbg("Failed to write receive address at index %d\n", index);
1836
	return -E1000_ERR_CONFIG;
B
Bruce Allan 已提交
1837 1838
}

1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
/**
 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
 *  @hw: pointer to the HW structure
 *
 *  Checks if firmware is blocking the reset of the PHY.
 *  This is a function pointer entry point only called by
 *  reset routines.
 **/
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{
1849 1850
	bool blocked = false;
	int i = 0;
1851

1852 1853 1854 1855
	while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
	       (i++ < 10))
		usleep_range(10000, 20000);
	return blocked ? E1000_BLK_PHY_RESET : 0;
1856 1857
}

1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
/**
 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
 *  @hw: pointer to the HW structure
 *
 *  Assumes semaphore already acquired.
 *
 **/
static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
{
	u16 phy_data;
	u32 strap = er32(STRAP);
B
Bruce Allan 已提交
1869 1870
	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
	    E1000_STRAP_SMT_FREQ_SHIFT;
1871
	s32 ret_val;
1872 1873 1874 1875 1876

	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;

	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
	if (ret_val)
1877
		return ret_val;
1878 1879 1880 1881 1882

	phy_data &= ~HV_SMB_ADDR_MASK;
	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;

B
Bruce Allan 已提交
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
	if (hw->phy.type == e1000_phy_i217) {
		/* Restore SMBus frequency */
		if (freq--) {
			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
			phy_data |= (freq & (1 << 0)) <<
			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
			phy_data |= (freq & (1 << 1)) <<
			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
		} else {
			e_dbg("Unsupported SMB frequency in PHY\n");
		}
	}

1896
	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1897 1898
}

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
/**
 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
 *  @hw:   pointer to the HW structure
 *
 *  SW should configure the LCD from the NVM extended configuration region
 *  as a workaround for certain parts.
 **/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
	struct e1000_phy_info *phy = &hw->phy;
	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1910
	s32 ret_val = 0;
1911 1912
	u16 word_addr, reg_data, reg_addr, phy_page = 0;

B
Bruce Allan 已提交
1913
	/* Initialize the PHY from the NVM on ICH platforms.  This
1914 1915 1916 1917 1918
	 * is needed due to an issue where the NVM configuration is
	 * not properly autoloaded after power transitions.
	 * Therefore, after each PHY reset, we will load the
	 * configuration data out of the NVM manually.
	 */
1919 1920 1921 1922 1923
	switch (hw->mac.type) {
	case e1000_ich8lan:
		if (phy->type != e1000_phy_igp_3)
			return ret_val;

B
Bruce Allan 已提交
1924 1925
		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1926 1927 1928 1929 1930
			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
			break;
		}
		/* Fall-thru */
	case e1000_pchlan:
1931
	case e1000_pch2lan:
B
Bruce Allan 已提交
1932
	case e1000_pch_lpt:
1933
		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1934 1935 1936 1937 1938 1939 1940 1941
		break;
	default:
		return ret_val;
	}

	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
		return ret_val;
1942 1943 1944

	data = er32(FEXTNVM);
	if (!(data & sw_cfg_mask))
1945
		goto release;
1946

B
Bruce Allan 已提交
1947
	/* Make sure HW does not configure LCD from PHY
1948 1949 1950
	 * extended configuration before SW configuration
	 */
	data = er32(EXTCNF_CTRL);
B
Bruce Allan 已提交
1951 1952 1953
	if ((hw->mac.type < e1000_pch2lan) &&
	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
		goto release;
1954 1955 1956 1957 1958

	cnf_size = er32(EXTCNF_SIZE);
	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
	if (!cnf_size)
1959
		goto release;
1960 1961 1962 1963

	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;

B
Bruce Allan 已提交
1964 1965 1966
	if (((hw->mac.type == e1000_pchlan) &&
	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
	    (hw->mac.type > e1000_pchlan)) {
B
Bruce Allan 已提交
1967
		/* HW configures the SMBus address and LEDs when the
1968 1969 1970
		 * OEM and LCD Write Enable bits are set in the NVM.
		 * When both NVM bits are cleared, SW will configure
		 * them instead.
1971
		 */
1972
		ret_val = e1000_write_smbus_addr(hw);
1973
		if (ret_val)
1974
			goto release;
1975

1976 1977 1978 1979
		data = er32(LEDCTL);
		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
							(u16)data);
		if (ret_val)
1980
			goto release;
1981
	}
1982

1983 1984 1985 1986 1987 1988
	/* Configure LCD from extended configuration region. */

	/* cnf_base_addr is in DWORD */
	word_addr = (u16)(cnf_base_addr << 1);

	for (i = 0; i < cnf_size; i++) {
1989
		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
1990
		if (ret_val)
1991
			goto release;
1992 1993 1994 1995

		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
					 1, &reg_addr);
		if (ret_val)
1996
			goto release;
1997 1998 1999 2000 2001

		/* Save off the PHY page for future writes. */
		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
			phy_page = reg_data;
			continue;
2002
		}
2003 2004 2005 2006

		reg_addr &= PHY_REG_MASK;
		reg_addr |= phy_page;

2007
		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2008
		if (ret_val)
2009
			goto release;
2010 2011
	}

2012
release:
2013
	hw->phy.ops.release(hw);
2014 2015 2016
	return ret_val;
}

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
/**
 *  e1000_k1_gig_workaround_hv - K1 Si workaround
 *  @hw:   pointer to the HW structure
 *  @link: link up bool flag
 *
 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
 *  If link is down, the function will restore the default K1 setting located
 *  in the NVM.
 **/
static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
{
	s32 ret_val = 0;
	u16 status_reg = 0;
	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;

	if (hw->mac.type != e1000_pchlan)
2034
		return 0;
2035 2036

	/* Wrap the whole flow with the sw flag */
2037
	ret_val = hw->phy.ops.acquire(hw);
2038
	if (ret_val)
2039
		return ret_val;
2040 2041 2042 2043

	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
	if (link) {
		if (hw->phy.type == e1000_phy_82578) {
2044 2045
			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
						  &status_reg);
2046 2047 2048
			if (ret_val)
				goto release;

2049 2050 2051
			status_reg &= (BM_CS_STATUS_LINK_UP |
				       BM_CS_STATUS_RESOLVED |
				       BM_CS_STATUS_SPEED_MASK);
2052 2053

			if (status_reg == (BM_CS_STATUS_LINK_UP |
2054 2055
					   BM_CS_STATUS_RESOLVED |
					   BM_CS_STATUS_SPEED_1000))
2056 2057 2058 2059
				k1_enable = false;
		}

		if (hw->phy.type == e1000_phy_82577) {
2060
			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2061 2062 2063
			if (ret_val)
				goto release;

2064 2065 2066
			status_reg &= (HV_M_STATUS_LINK_UP |
				       HV_M_STATUS_AUTONEG_COMPLETE |
				       HV_M_STATUS_SPEED_MASK);
2067 2068

			if (status_reg == (HV_M_STATUS_LINK_UP |
2069 2070
					   HV_M_STATUS_AUTONEG_COMPLETE |
					   HV_M_STATUS_SPEED_1000))
2071 2072 2073 2074
				k1_enable = false;
		}

		/* Link stall fix for link up */
2075
		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2076 2077 2078 2079 2080
		if (ret_val)
			goto release;

	} else {
		/* Link stall fix for link down */
2081
		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2082 2083 2084 2085 2086 2087 2088
		if (ret_val)
			goto release;
	}

	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);

release:
2089
	hw->phy.ops.release(hw);
2090

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	return ret_val;
}

/**
 *  e1000_configure_k1_ich8lan - Configure K1 power state
 *  @hw: pointer to the HW structure
 *  @enable: K1 state to configure
 *
 *  Configure the K1 power state based on the provided parameter.
 *  Assumes semaphore already acquired.
 *
 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
 **/
2104
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2105
{
2106
	s32 ret_val;
2107 2108 2109 2110 2111
	u32 ctrl_reg = 0;
	u32 ctrl_ext = 0;
	u32 reg = 0;
	u16 kmrn_reg = 0;

2112 2113
	ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
					      &kmrn_reg);
2114
	if (ret_val)
2115
		return ret_val;
2116 2117 2118 2119 2120 2121

	if (k1_enable)
		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
	else
		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;

2122 2123
	ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
					       kmrn_reg);
2124
	if (ret_val)
2125
		return ret_val;
2126

2127
	usleep_range(20, 40);
2128 2129 2130 2131 2132 2133 2134 2135
	ctrl_ext = er32(CTRL_EXT);
	ctrl_reg = er32(CTRL);

	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
	reg |= E1000_CTRL_FRCSPD;
	ew32(CTRL, reg);

	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2136
	e1e_flush();
2137
	usleep_range(20, 40);
2138 2139
	ew32(CTRL, ctrl_reg);
	ew32(CTRL_EXT, ctrl_ext);
2140
	e1e_flush();
2141
	usleep_range(20, 40);
2142

2143
	return 0;
2144 2145
}

2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
/**
 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
 *  @hw:       pointer to the HW structure
 *  @d0_state: boolean if entering d0 or d3 device state
 *
 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
 **/
static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
{
	s32 ret_val = 0;
	u32 mac_reg;
	u16 oem_reg;

B
Bruce Allan 已提交
2161
	if (hw->mac.type < e1000_pchlan)
2162 2163
		return ret_val;

2164
	ret_val = hw->phy.ops.acquire(hw);
2165 2166 2167
	if (ret_val)
		return ret_val;

B
Bruce Allan 已提交
2168
	if (hw->mac.type == e1000_pchlan) {
2169 2170
		mac_reg = er32(EXTCNF_CTRL);
		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2171
			goto release;
2172
	}
2173 2174 2175

	mac_reg = er32(FEXTNVM);
	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2176
		goto release;
2177 2178 2179

	mac_reg = er32(PHY_CTRL);

2180
	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2181
	if (ret_val)
2182
		goto release;
2183 2184 2185 2186 2187 2188 2189 2190 2191 2192

	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);

	if (d0_state) {
		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
			oem_reg |= HV_OEM_BITS_GBE_DIS;

		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
			oem_reg |= HV_OEM_BITS_LPLU;
	} else {
B
Bruce Allan 已提交
2193 2194
		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2195 2196
			oem_reg |= HV_OEM_BITS_GBE_DIS;

B
Bruce Allan 已提交
2197 2198
		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
			       E1000_PHY_CTRL_NOND0A_LPLU))
2199 2200
			oem_reg |= HV_OEM_BITS_LPLU;
	}
B
Bruce Allan 已提交
2201

B
Bruce Allan 已提交
2202 2203 2204 2205 2206
	/* Set Restart auto-neg to activate the bits */
	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
	    !hw->phy.ops.check_reset_block(hw))
		oem_reg |= HV_OEM_BITS_RESTART_AN;

2207
	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2208

2209
release:
2210
	hw->phy.ops.release(hw);
2211 2212 2213 2214

	return ret_val;
}

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
/**
 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
 *  @hw:   pointer to the HW structure
 **/
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
{
	s32 ret_val;
	u16 data;

	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
	if (ret_val)
		return ret_val;

	data |= HV_KMRN_MDIO_SLOW;

	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);

	return ret_val;
}

2235 2236 2237 2238 2239 2240 2241
/**
 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
 *  done after every PHY reset.
 **/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val = 0;
2242
	u16 phy_data;
2243 2244

	if (hw->mac.type != e1000_pchlan)
2245
		return 0;
2246

2247 2248 2249 2250
	/* Set MDIO slow mode before any other MDIO access */
	if (hw->phy.type == e1000_phy_82577) {
		ret_val = e1000_set_mdio_slow_mode_hv(hw);
		if (ret_val)
2251
			return ret_val;
2252 2253
	}

2254 2255 2256 2257 2258 2259 2260 2261 2262
	if (((hw->phy.type == e1000_phy_82577) &&
	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
		/* Disable generation of early preamble */
		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
		if (ret_val)
			return ret_val;

		/* Preamble tuning for SSC */
2263
		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2264 2265 2266 2267 2268
		if (ret_val)
			return ret_val;
	}

	if (hw->phy.type == e1000_phy_82578) {
B
Bruce Allan 已提交
2269
		/* Return registers to default by doing a soft reset then
2270 2271 2272 2273
		 * writing 0x3140 to the control register.
		 */
		if (hw->phy.revision < 2) {
			e1000e_phy_sw_reset(hw);
2274
			ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2275 2276 2277 2278
		}
	}

	/* Select page 0 */
2279
	ret_val = hw->phy.ops.acquire(hw);
2280 2281
	if (ret_val)
		return ret_val;
2282

2283
	hw->phy.addr = 1;
2284
	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2285
	hw->phy.ops.release(hw);
2286
	if (ret_val)
2287
		return ret_val;
2288

B
Bruce Allan 已提交
2289
	/* Configure the K1 Si workaround during phy reset assuming there is
2290 2291 2292
	 * link so that it disables K1 if link is in 1Gbps.
	 */
	ret_val = e1000_k1_gig_workaround_hv(hw, true);
2293
	if (ret_val)
2294
		return ret_val;
2295

2296 2297 2298
	/* Workaround for link disconnects on a busy hub in half duplex */
	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
2299
		return ret_val;
2300
	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2301 2302
	if (ret_val)
		goto release;
2303
	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2304 2305 2306 2307 2308
	if (ret_val)
		goto release;

	/* set MSE higher to enable link to stay up when noise is high */
	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2309 2310
release:
	hw->phy.ops.release(hw);
2311

2312 2313 2314
	return ret_val;
}

2315 2316 2317 2318 2319 2320 2321
/**
 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
 *  @hw:   pointer to the HW structure
 **/
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
{
	u32 mac_reg;
2322 2323 2324 2325 2326 2327 2328 2329 2330
	u16 i, phy_reg = 0;
	s32 ret_val;

	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
		return;
	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
	if (ret_val)
		goto release;
2331

2332 2333
	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2334
		mac_reg = er32(RAL(i));
2335 2336 2337 2338 2339
		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
					   (u16)(mac_reg & 0xFFFF));
		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
					   (u16)((mac_reg >> 16) & 0xFFFF));

2340
		mac_reg = er32(RAH(i));
2341 2342 2343 2344 2345
		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
					   (u16)(mac_reg & 0xFFFF));
		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
					   (u16)((mac_reg & E1000_RAH_AV)
						 >> 16));
2346
	}
2347 2348 2349 2350 2351

	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);

release:
	hw->phy.ops.release(hw);
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
}

/**
 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
 *  with 82579 PHY
 *  @hw: pointer to the HW structure
 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
 **/
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
{
	s32 ret_val = 0;
	u16 phy_reg, data;
	u32 mac_reg;
	u16 i;

B
Bruce Allan 已提交
2367
	if (hw->mac.type < e1000_pch2lan)
2368
		return 0;
2369 2370 2371 2372 2373

	/* disable Rx path while enabling/disabling workaround */
	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
	if (ret_val)
2374
		return ret_val;
2375 2376

	if (enable) {
2377
		/* Write Rx addresses (rar_entry_count for RAL/H, and
2378 2379
		 * SHRAL/H) and initial CRC values to the MAC
		 */
2380
		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2381
			u8 mac_addr[ETH_ALEN] = { 0 };
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
			u32 addr_high, addr_low;

			addr_high = er32(RAH(i));
			if (!(addr_high & E1000_RAH_AV))
				continue;
			addr_low = er32(RAL(i));
			mac_addr[0] = (addr_low & 0xFF);
			mac_addr[1] = ((addr_low >> 8) & 0xFF);
			mac_addr[2] = ((addr_low >> 16) & 0xFF);
			mac_addr[3] = ((addr_low >> 24) & 0xFF);
			mac_addr[4] = (addr_high & 0xFF);
			mac_addr[5] = ((addr_high >> 8) & 0xFF);

2395
			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
		}

		/* Write Rx addresses to the PHY */
		e1000_copy_rx_addrs_to_phy_ich8lan(hw);

		/* Enable jumbo frame workaround in the MAC */
		mac_reg = er32(FFLT_DBG);
		mac_reg &= ~(1 << 14);
		mac_reg |= (7 << 15);
		ew32(FFLT_DBG, mac_reg);

		mac_reg = er32(RCTL);
		mac_reg |= E1000_RCTL_SECRC;
		ew32(RCTL, mac_reg);

		ret_val = e1000e_read_kmrn_reg(hw,
2412 2413
					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
					       &data);
2414
		if (ret_val)
2415
			return ret_val;
2416 2417 2418 2419
		ret_val = e1000e_write_kmrn_reg(hw,
						E1000_KMRNCTRLSTA_CTRL_OFFSET,
						data | (1 << 0));
		if (ret_val)
2420
			return ret_val;
2421
		ret_val = e1000e_read_kmrn_reg(hw,
2422 2423
					       E1000_KMRNCTRLSTA_HD_CTRL,
					       &data);
2424
		if (ret_val)
2425
			return ret_val;
2426 2427 2428 2429 2430 2431
		data &= ~(0xF << 8);
		data |= (0xB << 8);
		ret_val = e1000e_write_kmrn_reg(hw,
						E1000_KMRNCTRLSTA_HD_CTRL,
						data);
		if (ret_val)
2432
			return ret_val;
2433 2434 2435 2436 2437 2438 2439

		/* Enable jumbo frame workaround in the PHY */
		e1e_rphy(hw, PHY_REG(769, 23), &data);
		data &= ~(0x7F << 5);
		data |= (0x37 << 5);
		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
		if (ret_val)
2440
			return ret_val;
2441 2442 2443 2444
		e1e_rphy(hw, PHY_REG(769, 16), &data);
		data &= ~(1 << 13);
		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
		if (ret_val)
2445
			return ret_val;
2446 2447 2448 2449 2450
		e1e_rphy(hw, PHY_REG(776, 20), &data);
		data &= ~(0x3FF << 2);
		data |= (0x1A << 2);
		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
		if (ret_val)
2451
			return ret_val;
2452
		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2453
		if (ret_val)
2454
			return ret_val;
2455 2456 2457
		e1e_rphy(hw, HV_PM_CTRL, &data);
		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
		if (ret_val)
2458
			return ret_val;
2459 2460 2461 2462 2463 2464 2465 2466
	} else {
		/* Write MAC register values back to h/w defaults */
		mac_reg = er32(FFLT_DBG);
		mac_reg &= ~(0xF << 14);
		ew32(FFLT_DBG, mac_reg);

		mac_reg = er32(RCTL);
		mac_reg &= ~E1000_RCTL_SECRC;
2467
		ew32(RCTL, mac_reg);
2468 2469

		ret_val = e1000e_read_kmrn_reg(hw,
2470 2471
					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
					       &data);
2472
		if (ret_val)
2473
			return ret_val;
2474 2475 2476 2477
		ret_val = e1000e_write_kmrn_reg(hw,
						E1000_KMRNCTRLSTA_CTRL_OFFSET,
						data & ~(1 << 0));
		if (ret_val)
2478
			return ret_val;
2479
		ret_val = e1000e_read_kmrn_reg(hw,
2480 2481
					       E1000_KMRNCTRLSTA_HD_CTRL,
					       &data);
2482
		if (ret_val)
2483
			return ret_val;
2484 2485 2486 2487 2488 2489
		data &= ~(0xF << 8);
		data |= (0xB << 8);
		ret_val = e1000e_write_kmrn_reg(hw,
						E1000_KMRNCTRLSTA_HD_CTRL,
						data);
		if (ret_val)
2490
			return ret_val;
2491 2492 2493 2494 2495 2496

		/* Write PHY register values back to h/w defaults */
		e1e_rphy(hw, PHY_REG(769, 23), &data);
		data &= ~(0x7F << 5);
		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
		if (ret_val)
2497
			return ret_val;
2498 2499 2500 2501
		e1e_rphy(hw, PHY_REG(769, 16), &data);
		data |= (1 << 13);
		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
		if (ret_val)
2502
			return ret_val;
2503 2504 2505 2506 2507
		e1e_rphy(hw, PHY_REG(776, 20), &data);
		data &= ~(0x3FF << 2);
		data |= (0x8 << 2);
		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
		if (ret_val)
2508
			return ret_val;
2509 2510
		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
		if (ret_val)
2511
			return ret_val;
2512 2513 2514
		e1e_rphy(hw, HV_PM_CTRL, &data);
		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
		if (ret_val)
2515
			return ret_val;
2516 2517 2518
	}

	/* re-enable Rx path after enabling/disabling workaround */
2519
	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530
}

/**
 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
 *  done after every PHY reset.
 **/
static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val = 0;

	if (hw->mac.type != e1000_pch2lan)
2531
		return 0;
2532 2533 2534

	/* Set MDIO slow mode before any other MDIO access */
	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2535 2536
	if (ret_val)
		return ret_val;
2537

2538 2539
	ret_val = hw->phy.ops.acquire(hw);
	if (ret_val)
2540
		return ret_val;
2541
	/* set MSE higher to enable link to stay up when noise is high */
2542
	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2543 2544 2545
	if (ret_val)
		goto release;
	/* drop link after 5 times MSE threshold was reached */
2546
	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2547 2548 2549
release:
	hw->phy.ops.release(hw);

2550 2551 2552
	return ret_val;
}

2553 2554 2555 2556
/**
 *  e1000_k1_gig_workaround_lv - K1 Si workaround
 *  @hw:   pointer to the HW structure
 *
2557 2558
 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
 *  Disable K1 in 1000Mbps and 100Mbps
2559 2560 2561 2562 2563 2564 2565
 **/
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
{
	s32 ret_val = 0;
	u16 status_reg = 0;

	if (hw->mac.type != e1000_pch2lan)
2566
		return 0;
2567

2568
	/* Set K1 beacon duration based on 10Mbs speed */
2569 2570
	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
	if (ret_val)
2571
		return ret_val;
2572 2573 2574

	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2575 2576
		if (status_reg &
		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2577 2578
			u16 pm_phy_reg;

2579
			/* LV 1G/100 Packet drop issue wa  */
2580 2581 2582
			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
			if (ret_val)
				return ret_val;
2583
			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2584 2585 2586
			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
			if (ret_val)
				return ret_val;
2587
		} else {
2588 2589 2590 2591
			u32 mac_reg;

			mac_reg = er32(FEXTNVM4);
			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2592
			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2593
			ew32(FEXTNVM4, mac_reg);
2594
		}
2595 2596 2597 2598 2599
	}

	return ret_val;
}

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
/**
 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
 *  @hw:   pointer to the HW structure
 *  @gate: boolean set to true to gate, false to ungate
 *
 *  Gate/ungate the automatic PHY configuration via hardware; perform
 *  the configuration via software instead.
 **/
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
{
	u32 extcnf_ctrl;

B
Bruce Allan 已提交
2612
	if (hw->mac.type < e1000_pch2lan)
2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
		return;

	extcnf_ctrl = er32(EXTCNF_CTRL);

	if (gate)
		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
	else
		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;

	ew32(EXTCNF_CTRL, extcnf_ctrl);
}

2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
/**
 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
 *  @hw: pointer to the HW structure
 *
 *  Check the appropriate indication the MAC has finished configuring the
 *  PHY after a software reset.
 **/
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
{
	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;

	/* Wait for basic configuration completes before proceeding */
	do {
		data = er32(STATUS);
		data &= E1000_STATUS_LAN_INIT_DONE;
2640
		usleep_range(100, 200);
2641 2642
	} while ((!data) && --loop);

B
Bruce Allan 已提交
2643
	/* If basic configuration is incomplete before the above loop
2644 2645 2646 2647
	 * count reaches 0, loading the configuration from NVM will
	 * leave the PHY in a bad state possibly resulting in no link.
	 */
	if (loop == 0)
2648
		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2649 2650 2651 2652 2653 2654 2655

	/* Clear the Init Done bit for the next init event */
	data = er32(STATUS);
	data &= ~E1000_STATUS_LAN_INIT_DONE;
	ew32(STATUS, data);
}

2656
/**
2657
 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2658 2659
 *  @hw: pointer to the HW structure
 **/
2660
static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2661
{
2662 2663
	s32 ret_val = 0;
	u16 reg;
2664

2665
	if (hw->phy.ops.check_reset_block(hw))
2666
		return 0;
2667

B
Bruce Allan 已提交
2668
	/* Allow time for h/w to get to quiescent state after reset */
2669
	usleep_range(10000, 20000);
B
Bruce Allan 已提交
2670

2671
	/* Perform any necessary post-reset workarounds */
2672 2673
	switch (hw->mac.type) {
	case e1000_pchlan:
2674 2675
		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
		if (ret_val)
2676
			return ret_val;
2677
		break;
2678 2679 2680
	case e1000_pch2lan:
		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
		if (ret_val)
2681
			return ret_val;
2682
		break;
2683 2684
	default:
		break;
2685 2686
	}

2687 2688 2689 2690 2691 2692
	/* Clear the host wakeup bit after lcd reset */
	if (hw->mac.type >= e1000_pchlan) {
		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
		reg &= ~BM_WUC_HOST_WU_BIT;
		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
	}
2693

2694 2695 2696
	/* Configure the LCD with the extended configuration region in NVM */
	ret_val = e1000_sw_lcd_config_ich8lan(hw);
	if (ret_val)
2697
		return ret_val;
2698

2699
	/* Configure the LCD with the OEM bits in NVM */
2700
	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2701

2702 2703 2704
	if (hw->mac.type == e1000_pch2lan) {
		/* Ungate automatic PHY configuration on non-managed 82579 */
		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2705
			usleep_range(10000, 20000);
2706 2707 2708 2709 2710 2711
			e1000_gate_hw_phy_config_ich8lan(hw, false);
		}

		/* Set EEE LPI Update Timer to 200usec */
		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val)
2712
			return ret_val;
2713 2714 2715
		ret_val = e1000_write_emi_reg_locked(hw,
						     I82579_LPI_UPDATE_TIMER,
						     0x1387);
2716
		hw->phy.ops.release(hw);
2717 2718
	}

2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
	return ret_val;
}

/**
 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
 *  @hw: pointer to the HW structure
 *
 *  Resets the PHY
 *  This is a function pointer entry point called by drivers
 *  or other shared routines.
 **/
static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val = 0;

2734 2735 2736 2737 2738
	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
	if ((hw->mac.type == e1000_pch2lan) &&
	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
		e1000_gate_hw_phy_config_ich8lan(hw, true);

2739 2740
	ret_val = e1000e_phy_hw_reset_generic(hw);
	if (ret_val)
2741
		return ret_val;
2742

2743
	return e1000_post_phy_reset_ich8lan(hw);
2744 2745
}

2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
/**
 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
 *  @hw: pointer to the HW structure
 *  @active: true to enable LPLU, false to disable
 *
 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
 *  the phy speed. This function will manually set the LPLU bit and restart
 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
 *  since it configures the same bit.
 **/
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
{
2759
	s32 ret_val;
2760 2761 2762 2763
	u16 oem_reg;

	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
	if (ret_val)
2764
		return ret_val;
2765 2766 2767 2768 2769 2770

	if (active)
		oem_reg |= HV_OEM_BITS_LPLU;
	else
		oem_reg &= ~HV_OEM_BITS_LPLU;

2771
	if (!hw->phy.ops.check_reset_block(hw))
2772 2773
		oem_reg |= HV_OEM_BITS_RESTART_AN;

2774
	return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2775 2776
}

2777 2778 2779
/**
 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
 *  @hw: pointer to the HW structure
2780
 *  @active: true to enable LPLU, false to disable
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796
 *
 *  Sets the LPLU D0 state according to the active flag.  When
 *  activating LPLU this function also disables smart speed
 *  and vice versa.  LPLU will not be activated unless the
 *  device autonegotiation advertisement meets standards of
 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
 *  This is a function pointer entry point only called by
 *  PHY setup routines.
 **/
static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
{
	struct e1000_phy_info *phy = &hw->phy;
	u32 phy_ctrl;
	s32 ret_val = 0;
	u16 data;

2797
	if (phy->type == e1000_phy_ife)
B
Bruce Allan 已提交
2798
		return 0;
2799 2800 2801 2802 2803 2804 2805

	phy_ctrl = er32(PHY_CTRL);

	if (active) {
		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
		ew32(PHY_CTRL, phy_ctrl);

2806 2807 2808
		if (phy->type != e1000_phy_igp_3)
			return 0;

B
Bruce Allan 已提交
2809
		/* Call gig speed drop workaround on LPLU before accessing
2810 2811
		 * any PHY registers
		 */
2812
		if (hw->mac.type == e1000_ich8lan)
2813 2814 2815 2816
			e1000e_gig_downshift_workaround_ich8lan(hw);

		/* When LPLU is enabled, we should disable SmartSpeed */
		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2817 2818
		if (ret_val)
			return ret_val;
2819 2820 2821 2822 2823 2824 2825 2826
		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
		if (ret_val)
			return ret_val;
	} else {
		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
		ew32(PHY_CTRL, phy_ctrl);

2827 2828 2829
		if (phy->type != e1000_phy_igp_3)
			return 0;

B
Bruce Allan 已提交
2830
		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2831 2832
		 * during Dx states where the power conservation is most
		 * important.  During driver activity we should enable
2833 2834
		 * SmartSpeed, so performance is maintained.
		 */
2835 2836
		if (phy->smart_speed == e1000_smart_speed_on) {
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2837
					   &data);
2838 2839 2840 2841 2842
			if (ret_val)
				return ret_val;

			data |= IGP01E1000_PSCFR_SMART_SPEED;
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2843
					   data);
2844 2845 2846 2847
			if (ret_val)
				return ret_val;
		} else if (phy->smart_speed == e1000_smart_speed_off) {
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2848
					   &data);
2849 2850 2851 2852 2853
			if (ret_val)
				return ret_val;

			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2854
					   data);
2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865
			if (ret_val)
				return ret_val;
		}
	}

	return 0;
}

/**
 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
 *  @hw: pointer to the HW structure
2866
 *  @active: true to enable LPLU, false to disable
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
 *
 *  Sets the LPLU D3 state according to the active flag.  When
 *  activating LPLU this function also disables smart speed
 *  and vice versa.  LPLU will not be activated unless the
 *  device autonegotiation advertisement meets standards of
 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
 *  This is a function pointer entry point only called by
 *  PHY setup routines.
 **/
static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
{
	struct e1000_phy_info *phy = &hw->phy;
	u32 phy_ctrl;
2880
	s32 ret_val = 0;
2881 2882 2883 2884 2885 2886 2887
	u16 data;

	phy_ctrl = er32(PHY_CTRL);

	if (!active) {
		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
		ew32(PHY_CTRL, phy_ctrl);
2888 2889 2890 2891

		if (phy->type != e1000_phy_igp_3)
			return 0;

B
Bruce Allan 已提交
2892
		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2893 2894
		 * during Dx states where the power conservation is most
		 * important.  During driver activity we should enable
2895 2896
		 * SmartSpeed, so performance is maintained.
		 */
2897
		if (phy->smart_speed == e1000_smart_speed_on) {
2898 2899
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
					   &data);
2900 2901 2902 2903
			if (ret_val)
				return ret_val;

			data |= IGP01E1000_PSCFR_SMART_SPEED;
2904 2905
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
					   data);
2906 2907 2908
			if (ret_val)
				return ret_val;
		} else if (phy->smart_speed == e1000_smart_speed_off) {
2909 2910
			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
					   &data);
2911 2912 2913 2914
			if (ret_val)
				return ret_val;

			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2915 2916
			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
					   data);
2917 2918 2919 2920 2921 2922 2923 2924 2925
			if (ret_val)
				return ret_val;
		}
	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
		ew32(PHY_CTRL, phy_ctrl);

2926 2927 2928
		if (phy->type != e1000_phy_igp_3)
			return 0;

B
Bruce Allan 已提交
2929
		/* Call gig speed drop workaround on LPLU before accessing
2930 2931
		 * any PHY registers
		 */
2932
		if (hw->mac.type == e1000_ich8lan)
2933 2934 2935
			e1000e_gig_downshift_workaround_ich8lan(hw);

		/* When LPLU is enabled, we should disable SmartSpeed */
2936
		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2937 2938 2939 2940
		if (ret_val)
			return ret_val;

		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2941
		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2942 2943
	}

2944
	return ret_val;
2945 2946
}

2947 2948 2949 2950 2951 2952
/**
 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
 *  @hw: pointer to the HW structure
 *  @bank:  pointer to the variable that returns the active bank
 *
 *  Reads signature byte from the NVM using the flash access registers.
2953
 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2954 2955 2956
 **/
static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
{
2957
	u32 eecd;
2958 2959 2960
	struct e1000_nvm_info *nvm = &hw->nvm;
	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2961
	u8 sig_byte = 0;
2962
	s32 ret_val;
2963

2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976
	switch (hw->mac.type) {
	case e1000_ich8lan:
	case e1000_ich9lan:
		eecd = er32(EECD);
		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
		    E1000_EECD_SEC1VAL_VALID_MASK) {
			if (eecd & E1000_EECD_SEC1VAL)
				*bank = 1;
			else
				*bank = 0;

			return 0;
		}
2977
		e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2978 2979 2980 2981 2982 2983 2984
		/* fall-thru */
	default:
		/* set bank to 0 in case flash read fails */
		*bank = 0;

		/* Check bank 0 */
		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2985
							&sig_byte);
2986 2987 2988 2989
		if (ret_val)
			return ret_val;
		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
		    E1000_ICH_NVM_SIG_VALUE) {
2990
			*bank = 0;
2991 2992
			return 0;
		}
2993

2994 2995
		/* Check bank 1 */
		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2996 2997
							bank1_offset,
							&sig_byte);
2998 2999 3000 3001 3002 3003
		if (ret_val)
			return ret_val;
		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
		    E1000_ICH_NVM_SIG_VALUE) {
			*bank = 1;
			return 0;
3004
		}
3005

3006
		e_dbg("ERROR: No valid NVM bank present\n");
3007
		return -E1000_ERR_NVM;
3008 3009 3010
	}
}

3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
/**
 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
 *  @hw: pointer to the HW structure
 *  @offset: The offset (in bytes) of the word(s) to read.
 *  @words: Size of data to read in words
 *  @data: Pointer to the word(s) to read at offset.
 *
 *  Reads a word(s) from the NVM using the flash access registers.
 **/
static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
				  u16 *data)
{
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
	u32 act_offset;
3026
	s32 ret_val = 0;
3027
	u32 bank = 0;
3028 3029 3030 3031
	u16 i, word;

	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
	    (words == 0)) {
3032
		e_dbg("nvm parameter(s) out of bounds\n");
3033 3034
		ret_val = -E1000_ERR_NVM;
		goto out;
3035 3036
	}

3037
	nvm->ops.acquire(hw);
3038

3039
	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3040
	if (ret_val) {
3041
		e_dbg("Could not detect valid bank, assuming bank 0\n");
3042 3043
		bank = 0;
	}
3044 3045

	act_offset = (bank) ? nvm->flash_bank_size : 0;
3046 3047
	act_offset += offset;

3048
	ret_val = 0;
3049
	for (i = 0; i < words; i++) {
3050 3051
		if (dev_spec->shadow_ram[offset + i].modified) {
			data[i] = dev_spec->shadow_ram[offset + i].value;
3052 3053 3054 3055 3056 3057 3058 3059 3060 3061
		} else {
			ret_val = e1000_read_flash_word_ich8lan(hw,
								act_offset + i,
								&word);
			if (ret_val)
				break;
			data[i] = word;
		}
	}

3062
	nvm->ops.release(hw);
3063

3064 3065
out:
	if (ret_val)
3066
		e_dbg("NVM read error: %d\n", ret_val);
3067

3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
	return ret_val;
}

/**
 *  e1000_flash_cycle_init_ich8lan - Initialize flash
 *  @hw: pointer to the HW structure
 *
 *  This function does initial flash setup so that a new read/write/erase cycle
 *  can be started.
 **/
static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
	union ich8_hws_flash_status hsfsts;
	s32 ret_val = -E1000_ERR_NVM;

	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);

	/* Check if the flash descriptor is valid */
B
Bruce Allan 已提交
3086
	if (!hsfsts.hsf_status.fldesvalid) {
3087
		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3088 3089 3090 3091 3092 3093 3094 3095 3096
		return -E1000_ERR_NVM;
	}

	/* Clear FCERR and DAEL in hw status by writing 1 */
	hsfsts.hsf_status.flcerr = 1;
	hsfsts.hsf_status.dael = 1;

	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);

B
Bruce Allan 已提交
3097
	/* Either we should have a hardware SPI cycle in progress
3098 3099
	 * bit to check against, in order to start a new cycle or
	 * FDONE bit should be changed in the hardware so that it
3100
	 * is 1 after hardware reset, which can then be used as an
3101 3102 3103 3104
	 * indication whether a cycle is in progress or has been
	 * completed.
	 */

B
Bruce Allan 已提交
3105
	if (!hsfsts.hsf_status.flcinprog) {
B
Bruce Allan 已提交
3106
		/* There is no cycle running at present,
B
Bruce Allan 已提交
3107
		 * so we can start a cycle.
3108 3109
		 * Begin by setting Flash Cycle Done.
		 */
3110 3111 3112 3113
		hsfsts.hsf_status.flcdone = 1;
		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
		ret_val = 0;
	} else {
3114
		s32 i;
3115

B
Bruce Allan 已提交
3116
		/* Otherwise poll for sometime so the current
3117 3118
		 * cycle has a chance to end before giving up.
		 */
3119
		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3120
			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
B
Bruce Allan 已提交
3121
			if (!hsfsts.hsf_status.flcinprog) {
3122 3123 3124 3125 3126
				ret_val = 0;
				break;
			}
			udelay(1);
		}
3127
		if (!ret_val) {
B
Bruce Allan 已提交
3128
			/* Successful in waiting for previous cycle to timeout,
3129 3130
			 * now set the Flash Cycle Done.
			 */
3131 3132 3133
			hsfsts.hsf_status.flcdone = 1;
			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
		} else {
J
Joe Perches 已提交
3134
			e_dbg("Flash controller busy, cannot get access\n");
3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
		}
	}

	return ret_val;
}

/**
 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
 *  @hw: pointer to the HW structure
 *  @timeout: maximum time to wait for completion
 *
 *  This function starts a flash cycle and waits for its completion.
 **/
static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
{
	union ich8_hws_flash_ctrl hsflctl;
	union ich8_hws_flash_status hsfsts;
	u32 i = 0;

	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
	hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
	hsflctl.hsf_ctrl.flcgo = 1;
	ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);

	/* wait till FDONE bit is set to 1 */
	do {
		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
B
Bruce Allan 已提交
3162
		if (hsfsts.hsf_status.flcdone)
3163 3164 3165 3166
			break;
		udelay(1);
	} while (i++ < timeout);

B
Bruce Allan 已提交
3167
	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3168 3169
		return 0;

3170
	return -E1000_ERR_NVM;
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
}

/**
 *  e1000_read_flash_word_ich8lan - Read word from flash
 *  @hw: pointer to the HW structure
 *  @offset: offset to data location
 *  @data: pointer to the location for storing the data
 *
 *  Reads the flash word at offset into data.  Offset is converted
 *  to bytes before read.
 **/
static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
					 u16 *data)
{
	/* Must convert offset into bytes. */
	offset <<= 1;

	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
}

3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213
/**
 *  e1000_read_flash_byte_ich8lan - Read byte from flash
 *  @hw: pointer to the HW structure
 *  @offset: The offset of the byte to read.
 *  @data: Pointer to a byte to store the value read.
 *
 *  Reads a single byte from the NVM using the flash access registers.
 **/
static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
					 u8 *data)
{
	s32 ret_val;
	u16 word = 0;

	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
	if (ret_val)
		return ret_val;

	*data = (u8)word;

	return 0;
}

3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232
/**
 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
 *  @hw: pointer to the HW structure
 *  @offset: The offset (in bytes) of the byte or word to read.
 *  @size: Size of data to read, 1=byte 2=word
 *  @data: Pointer to the word to store the value read.
 *
 *  Reads a byte or word from the NVM using the flash access registers.
 **/
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
					 u8 size, u16 *data)
{
	union ich8_hws_flash_status hsfsts;
	union ich8_hws_flash_ctrl hsflctl;
	u32 flash_linear_addr;
	u32 flash_data = 0;
	s32 ret_val = -E1000_ERR_NVM;
	u8 count = 0;

B
Bruce Allan 已提交
3233
	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3234 3235
		return -E1000_ERR_NVM;

3236 3237
	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
			     hw->nvm.flash_base_addr);
3238 3239 3240 3241 3242

	do {
		udelay(1);
		/* Steps */
		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3243
		if (ret_val)
3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
			break;

		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
		hsflctl.hsf_ctrl.fldbcount = size - 1;
		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);

		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);

3254 3255 3256
		ret_val =
		    e1000_flash_cycle_ich8lan(hw,
					      ICH_FLASH_READ_COMMAND_TIMEOUT);
3257

B
Bruce Allan 已提交
3258
		/* Check if FCERR is set to 1, if set to 1, clear it
3259 3260
		 * and try the whole sequence a few more times, else
		 * read in (shift in) the Flash Data0, the order is
3261 3262
		 * least significant byte first msb to lsb
		 */
3263
		if (!ret_val) {
3264
			flash_data = er32flash(ICH_FLASH_FDATA0);
B
Bruce Allan 已提交
3265
			if (size == 1)
3266
				*data = (u8)(flash_data & 0x000000FF);
B
Bruce Allan 已提交
3267
			else if (size == 2)
3268 3269 3270
				*data = (u16)(flash_data & 0x0000FFFF);
			break;
		} else {
B
Bruce Allan 已提交
3271
			/* If we've gotten here, then things are probably
3272 3273 3274 3275 3276
			 * completely hosed, but if the error condition is
			 * detected, it won't hurt to give it another try...
			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
			 */
			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
B
Bruce Allan 已提交
3277
			if (hsfsts.hsf_status.flcerr) {
3278 3279
				/* Repeat for some time before giving up. */
				continue;
B
Bruce Allan 已提交
3280
			} else if (!hsfsts.hsf_status.flcdone) {
3281
				e_dbg("Timeout error - flash cycle did not complete.\n");
3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307
				break;
			}
		}
	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);

	return ret_val;
}

/**
 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
 *  @hw: pointer to the HW structure
 *  @offset: The offset (in bytes) of the word(s) to write.
 *  @words: Size of data to write in words
 *  @data: Pointer to the word(s) to write at offset.
 *
 *  Writes a byte or word to the NVM using the flash access registers.
 **/
static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
				   u16 *data)
{
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
	u16 i;

	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
	    (words == 0)) {
3308
		e_dbg("nvm parameter(s) out of bounds\n");
3309 3310 3311
		return -E1000_ERR_NVM;
	}

3312
	nvm->ops.acquire(hw);
3313

3314
	for (i = 0; i < words; i++) {
3315 3316
		dev_spec->shadow_ram[offset + i].modified = true;
		dev_spec->shadow_ram[offset + i].value = data[i];
3317 3318
	}

3319
	nvm->ops.release(hw);
3320

3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
	return 0;
}

/**
 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
 *  @hw: pointer to the HW structure
 *
 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
 *  which writes the checksum to the shadow ram.  The changes in the shadow
 *  ram are then committed to the EEPROM by processing each bank at a time
 *  checking for the modified bit and writing only the pending changes.
3332
 *  After a successful commit, the shadow ram is cleared and is ready for
3333 3334 3335 3336 3337 3338
 *  future writes.
 **/
static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
{
	struct e1000_nvm_info *nvm = &hw->nvm;
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3339
	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3340 3341 3342 3343 3344
	s32 ret_val;
	u16 data;

	ret_val = e1000e_update_nvm_checksum_generic(hw);
	if (ret_val)
3345
		goto out;
3346 3347

	if (nvm->type != e1000_nvm_flash_sw)
3348
		goto out;
3349

3350
	nvm->ops.acquire(hw);
3351

B
Bruce Allan 已提交
3352
	/* We're writing to the opposite bank so if we're on bank 1,
3353
	 * write to bank 0 etc.  We also need to erase the segment that
3354 3355
	 * is going to be written
	 */
B
Bruce Allan 已提交
3356
	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3357
	if (ret_val) {
3358
		e_dbg("Could not detect valid bank, assuming bank 0\n");
3359
		bank = 0;
3360
	}
3361 3362

	if (bank == 0) {
3363 3364
		new_bank_offset = nvm->flash_bank_size;
		old_bank_offset = 0;
3365
		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3366 3367
		if (ret_val)
			goto release;
3368 3369 3370
	} else {
		old_bank_offset = nvm->flash_bank_size;
		new_bank_offset = 0;
3371
		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3372 3373
		if (ret_val)
			goto release;
3374 3375 3376
	}

	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
B
Bruce Allan 已提交
3377
		/* Determine whether to write the value stored
3378
		 * in the other NVM bank or a modified value stored
3379 3380
		 * in the shadow RAM
		 */
3381 3382 3383
		if (dev_spec->shadow_ram[i].modified) {
			data = dev_spec->shadow_ram[i].value;
		} else {
3384
			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3385 3386
								old_bank_offset,
								&data);
3387 3388
			if (ret_val)
				break;
3389 3390
		}

B
Bruce Allan 已提交
3391
		/* If the word is 0x13, then make sure the signature bits
3392 3393 3394 3395
		 * (15:14) are 11b until the commit has completed.
		 * This will allow us to write 10b which indicates the
		 * signature is valid.  We want to do this after the write
		 * has completed so that we don't mark the segment valid
3396 3397
		 * while the write is still in progress
		 */
3398 3399 3400 3401 3402 3403
		if (i == E1000_ICH_NVM_SIG_WORD)
			data |= E1000_ICH_NVM_SIG_MASK;

		/* Convert offset to bytes. */
		act_offset = (i + new_bank_offset) << 1;

3404
		usleep_range(100, 200);
3405 3406 3407 3408 3409 3410 3411
		/* Write the bytes to the new bank. */
		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
							       act_offset,
							       (u8)data);
		if (ret_val)
			break;

3412
		usleep_range(100, 200);
3413
		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3414 3415
							       act_offset + 1,
							       (u8)(data >> 8));
3416 3417 3418 3419
		if (ret_val)
			break;
	}

B
Bruce Allan 已提交
3420
	/* Don't bother writing the segment valid bits if sector
3421 3422
	 * programming failed.
	 */
3423
	if (ret_val) {
3424
		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3425
		e_dbg("Flash commit failed.\n");
3426
		goto release;
3427 3428
	}

B
Bruce Allan 已提交
3429
	/* Finally validate the new segment by setting bit 15:14
3430 3431
	 * to 10b in word 0x13 , this can be done without an
	 * erase as well since these bits are 11 to start with
3432 3433
	 * and we need to change bit 14 to 0b
	 */
3434
	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3435
	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3436 3437 3438
	if (ret_val)
		goto release;

3439 3440 3441 3442
	data &= 0xBFFF;
	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
						       act_offset * 2 + 1,
						       (u8)(data >> 8));
3443 3444
	if (ret_val)
		goto release;
3445

B
Bruce Allan 已提交
3446
	/* And invalidate the previously valid segment by setting
3447 3448
	 * its signature word (0x13) high_byte to 0b. This can be
	 * done without an erase because flash erase sets all bits
3449 3450
	 * to 1's. We can write 1's to 0's without an erase
	 */
3451 3452
	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3453 3454
	if (ret_val)
		goto release;
3455 3456 3457

	/* Great!  Everything worked, we can now clear the cached entries. */
	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3458
		dev_spec->shadow_ram[i].modified = false;
3459 3460 3461
		dev_spec->shadow_ram[i].value = 0xFFFF;
	}

3462
release:
3463
	nvm->ops.release(hw);
3464

B
Bruce Allan 已提交
3465
	/* Reload the EEPROM, or else modifications will not appear
3466 3467
	 * until after the next adapter reset.
	 */
3468
	if (!ret_val) {
3469
		nvm->ops.reload(hw);
3470
		usleep_range(10000, 20000);
3471
	}
3472

3473 3474
out:
	if (ret_val)
3475
		e_dbg("NVM update error: %d\n", ret_val);
3476

3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
	return ret_val;
}

/**
 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
 *  @hw: pointer to the HW structure
 *
 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
 *  calculated, in which case we need to calculate the checksum and set bit 6.
 **/
static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val;
	u16 data;
3492 3493
	u16 word;
	u16 valid_csum_mask;
3494

3495 3496 3497 3498
	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
	 * the checksum needs to be fixed.  This bit is an indication that
	 * the NVM was prepared by OEM software and did not calculate
	 * the checksum...a likely scenario.
3499
	 */
3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
	switch (hw->mac.type) {
	case e1000_pch_lpt:
		word = NVM_COMPAT;
		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
		break;
	default:
		word = NVM_FUTURE_INIT_WORD1;
		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
		break;
	}

	ret_val = e1000_read_nvm(hw, word, 1, &data);
3512 3513 3514
	if (ret_val)
		return ret_val;

3515 3516 3517
	if (!(data & valid_csum_mask)) {
		data |= valid_csum_mask;
		ret_val = e1000_write_nvm(hw, word, 1, &data);
3518 3519 3520 3521 3522 3523 3524 3525 3526 3527
		if (ret_val)
			return ret_val;
		ret_val = e1000e_update_nvm_checksum(hw);
		if (ret_val)
			return ret_val;
	}

	return e1000e_validate_nvm_checksum_generic(hw);
}

3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
/**
 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
 *  @hw: pointer to the HW structure
 *
 *  To prevent malicious write/erase of the NVM, set it to be read-only
 *  so that the hardware ignores all write/erase cycles of the NVM via
 *  the flash control registers.  The shadow-ram copy of the NVM will
 *  still be updated, however any updates to this copy will not stick
 *  across driver reloads.
 **/
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
{
3540
	struct e1000_nvm_info *nvm = &hw->nvm;
3541 3542 3543 3544
	union ich8_flash_protected_range pr0;
	union ich8_hws_flash_status hsfsts;
	u32 gfpreg;

3545
	nvm->ops.acquire(hw);
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555

	gfpreg = er32flash(ICH_FLASH_GFPREG);

	/* Write-protect GbE Sector of NVM */
	pr0.regval = er32flash(ICH_FLASH_PR0);
	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
	pr0.range.wpe = true;
	ew32flash(ICH_FLASH_PR0, pr0.regval);

B
Bruce Allan 已提交
3556
	/* Lock down a subset of GbE Flash Control Registers, e.g.
3557 3558 3559 3560 3561 3562 3563 3564
	 * PR0 to prevent the write-protection from being lifted.
	 * Once FLOCKDN is set, the registers protected by it cannot
	 * be written until FLOCKDN is cleared by a hardware reset.
	 */
	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
	hsfsts.hsf_status.flockdn = true;
	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);

3565
	nvm->ops.release(hw);
3566 3567
}

3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
/**
 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
 *  @hw: pointer to the HW structure
 *  @offset: The offset (in bytes) of the byte/word to read.
 *  @size: Size of data to read, 1=byte 2=word
 *  @data: The byte(s) to write to the NVM.
 *
 *  Writes one/two bytes to the NVM using the flash access registers.
 **/
static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
					  u8 size, u16 data)
{
	union ich8_hws_flash_status hsfsts;
	union ich8_hws_flash_ctrl hsflctl;
	u32 flash_linear_addr;
	u32 flash_data = 0;
	s32 ret_val;
	u8 count = 0;

	if (size < 1 || size > 2 || data > size * 0xff ||
	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
		return -E1000_ERR_NVM;

3591 3592
	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
			     hw->nvm.flash_base_addr);
3593 3594 3595 3596 3597 3598 3599 3600 3601 3602

	do {
		udelay(1);
		/* Steps */
		ret_val = e1000_flash_cycle_init_ich8lan(hw);
		if (ret_val)
			break;

		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3603
		hsflctl.hsf_ctrl.fldbcount = size - 1;
3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);

		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);

		if (size == 1)
			flash_data = (u32)data & 0x00FF;
		else
			flash_data = (u32)data;

		ew32flash(ICH_FLASH_FDATA0, flash_data);

B
Bruce Allan 已提交
3616
		/* check if FCERR is set to 1 , if set to 1, clear it
3617 3618
		 * and try the whole sequence a few more times else done
		 */
3619 3620 3621
		ret_val =
		    e1000_flash_cycle_ich8lan(hw,
					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3622 3623 3624
		if (!ret_val)
			break;

B
Bruce Allan 已提交
3625
		/* If we're here, then things are most likely
3626 3627 3628 3629 3630
		 * completely hosed, but if the error condition
		 * is detected, it won't hurt to give it another
		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
		 */
		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
B
Bruce Allan 已提交
3631
		if (hsfsts.hsf_status.flcerr)
3632 3633
			/* Repeat for some time before giving up. */
			continue;
B
Bruce Allan 已提交
3634
		if (!hsfsts.hsf_status.flcdone) {
3635
			e_dbg("Timeout error - flash cycle did not complete.\n");
3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
			break;
		}
	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);

	return ret_val;
}

/**
 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
 *  @hw: pointer to the HW structure
 *  @offset: The index of the byte to read.
 *  @data: The byte to write to the NVM.
 *
 *  Writes a single byte to the NVM using the flash access registers.
 **/
static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
					  u8 data)
{
	u16 word = (u16)data;

	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
}

/**
 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
 *  @hw: pointer to the HW structure
 *  @offset: The offset of the byte to write.
 *  @byte: The byte to write to the NVM.
 *
 *  Writes a single byte to the NVM using the flash access registers.
 *  Goes through a retry algorithm before giving up.
 **/
static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
						u32 offset, u8 byte)
{
	s32 ret_val;
	u16 program_retries;

	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
	if (!ret_val)
		return ret_val;

	for (program_retries = 0; program_retries < 100; program_retries++) {
3679
		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
3680
		usleep_range(100, 200);
3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708
		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
		if (!ret_val)
			break;
	}
	if (program_retries == 100)
		return -E1000_ERR_NVM;

	return 0;
}

/**
 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
 *  @hw: pointer to the HW structure
 *  @bank: 0 for first bank, 1 for second bank, etc.
 *
 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
 *  bank N is 4096 * N + flash_reg_addr.
 **/
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
{
	struct e1000_nvm_info *nvm = &hw->nvm;
	union ich8_hws_flash_status hsfsts;
	union ich8_hws_flash_ctrl hsflctl;
	u32 flash_linear_addr;
	/* bank size is in 16bit words - adjust to bytes */
	u32 flash_bank_size = nvm->flash_bank_size * 2;
	s32 ret_val;
	s32 count = 0;
3709
	s32 j, iteration, sector_size;
3710 3711 3712

	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);

B
Bruce Allan 已提交
3713
	/* Determine HW Sector size: Read BERASE bits of hw flash status
3714 3715
	 * register
	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732
	 *     consecutive sectors.  The start index for the nth Hw sector
	 *     can be calculated as = bank * 4096 + n * 256
	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
	 *     The start index for the nth Hw sector can be calculated
	 *     as = bank * 4096
	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
	 *     (ich9 only, otherwise error condition)
	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
	 */
	switch (hsfsts.hsf_status.berasesz) {
	case 0:
		/* Hw sector size 256 */
		sector_size = ICH_FLASH_SEG_SIZE_256;
		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
		break;
	case 1:
		sector_size = ICH_FLASH_SEG_SIZE_4K;
3733
		iteration = 1;
3734 3735
		break;
	case 2:
3736 3737
		sector_size = ICH_FLASH_SEG_SIZE_8K;
		iteration = 1;
3738 3739 3740
		break;
	case 3:
		sector_size = ICH_FLASH_SEG_SIZE_64K;
3741
		iteration = 1;
3742 3743 3744 3745 3746 3747 3748
		break;
	default:
		return -E1000_ERR_NVM;
	}

	/* Start with the base address, then add the sector offset. */
	flash_linear_addr = hw->nvm.flash_base_addr;
3749
	flash_linear_addr += (bank) ? flash_bank_size : 0;
3750

3751
	for (j = 0; j < iteration; j++) {
3752
		do {
3753 3754
			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;

3755 3756 3757 3758 3759
			/* Steps */
			ret_val = e1000_flash_cycle_init_ich8lan(hw);
			if (ret_val)
				return ret_val;

B
Bruce Allan 已提交
3760
			/* Write a value 11 (block Erase) in Flash
3761 3762
			 * Cycle field in hw flash control
			 */
3763 3764 3765 3766
			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);

B
Bruce Allan 已提交
3767
			/* Write the last 24 bits of an index within the
3768 3769 3770 3771 3772 3773
			 * block into Flash Linear address field in Flash
			 * Address.
			 */
			flash_linear_addr += (j * sector_size);
			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);

3774
			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3775
			if (!ret_val)
3776 3777
				break;

B
Bruce Allan 已提交
3778
			/* Check if FCERR is set to 1.  If 1,
3779
			 * clear it and try the whole sequence
3780 3781
			 * a few more times else Done
			 */
3782
			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
B
Bruce Allan 已提交
3783
			if (hsfsts.hsf_status.flcerr)
3784
				/* repeat for some time before giving up */
3785
				continue;
B
Bruce Allan 已提交
3786
			else if (!hsfsts.hsf_status.flcdone)
3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
				return ret_val;
		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
	}

	return 0;
}

/**
 *  e1000_valid_led_default_ich8lan - Set the default LED settings
 *  @hw: pointer to the HW structure
 *  @data: Pointer to the LED settings
 *
 *  Reads the LED default settings from the NVM to data.  If the NVM LED
 *  settings is all 0's or F's, set the LED default to a valid LED default
 *  setting.
 **/
static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
{
	s32 ret_val;

	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
	if (ret_val) {
3809
		e_dbg("NVM Read Error\n");
3810 3811 3812
		return ret_val;
	}

3813
	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3814 3815 3816 3817 3818
		*data = ID_LED_DEFAULT_ICH8LAN;

	return 0;
}

3819 3820 3821 3822 3823 3824 3825 3826 3827
/**
 *  e1000_id_led_init_pchlan - store LED configurations
 *  @hw: pointer to the HW structure
 *
 *  PCH does not control LEDs via the LEDCTL register, rather it uses
 *  the PHY LED configuration register.
 *
 *  PCH also does not have an "always on" or "always off" mode which
 *  complicates the ID feature.  Instead of using the "on" mode to indicate
3828
 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842
 *  use "link_up" mode.  The LEDs will still ID on request if there is no
 *  link based on logic in e1000_led_[on|off]_pchlan().
 **/
static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
{
	struct e1000_mac_info *mac = &hw->mac;
	s32 ret_val;
	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
	u16 data, i, temp, shift;

	/* Get default ID LED modes */
	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
	if (ret_val)
3843
		return ret_val;
3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887

	mac->ledctl_default = er32(LEDCTL);
	mac->ledctl_mode1 = mac->ledctl_default;
	mac->ledctl_mode2 = mac->ledctl_default;

	for (i = 0; i < 4; i++) {
		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
		shift = (i * 5);
		switch (temp) {
		case ID_LED_ON1_DEF2:
		case ID_LED_ON1_ON2:
		case ID_LED_ON1_OFF2:
			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
			mac->ledctl_mode1 |= (ledctl_on << shift);
			break;
		case ID_LED_OFF1_DEF2:
		case ID_LED_OFF1_ON2:
		case ID_LED_OFF1_OFF2:
			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
			mac->ledctl_mode1 |= (ledctl_off << shift);
			break;
		default:
			/* Do nothing */
			break;
		}
		switch (temp) {
		case ID_LED_DEF1_ON2:
		case ID_LED_ON1_ON2:
		case ID_LED_OFF1_ON2:
			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
			mac->ledctl_mode2 |= (ledctl_on << shift);
			break;
		case ID_LED_DEF1_OFF2:
		case ID_LED_ON1_OFF2:
		case ID_LED_OFF1_OFF2:
			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
			mac->ledctl_mode2 |= (ledctl_off << shift);
			break;
		default:
			/* Do nothing */
			break;
		}
	}

3888
	return 0;
3889 3890
}

3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
/**
 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
 *  @hw: pointer to the HW structure
 *
 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
 *  register, so the the bus width is hard coded.
 **/
static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
{
	struct e1000_bus_info *bus = &hw->bus;
	s32 ret_val;

	ret_val = e1000e_get_bus_info_pcie(hw);

B
Bruce Allan 已提交
3905
	/* ICH devices are "PCI Express"-ish.  They have
3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
	 * a configuration space, but do not contain
	 * PCI Express Capability registers, so bus width
	 * must be hardcoded.
	 */
	if (bus->width == e1000_bus_width_unknown)
		bus->width = e1000_bus_width_pcie_x1;

	return ret_val;
}

/**
 *  e1000_reset_hw_ich8lan - Reset the hardware
 *  @hw: pointer to the HW structure
 *
 *  Does a full reset of the hardware which includes a reset of the PHY and
 *  MAC.
 **/
static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
{
3925
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3926 3927
	u16 kum_cfg;
	u32 ctrl, reg;
3928 3929
	s32 ret_val;

B
Bruce Allan 已提交
3930
	/* Prevent the PCI-E bus from sticking if there is no TLP connection
3931 3932 3933
	 * on the last TLP read/write transaction when MAC is reset.
	 */
	ret_val = e1000e_disable_pcie_master(hw);
3934
	if (ret_val)
3935
		e_dbg("PCI-E Master disable polling has failed.\n");
3936

3937
	e_dbg("Masking off all interrupts\n");
3938 3939
	ew32(IMC, 0xffffffff);

B
Bruce Allan 已提交
3940
	/* Disable the Transmit and Receive units.  Then delay to allow
3941 3942 3943 3944 3945 3946 3947
	 * any pending transactions to complete before we hit the MAC
	 * with the global reset.
	 */
	ew32(RCTL, 0);
	ew32(TCTL, E1000_TCTL_PSP);
	e1e_flush();

3948
	usleep_range(10000, 20000);
3949 3950 3951 3952 3953 3954 3955 3956 3957

	/* Workaround for ICH8 bit corruption issue in FIFO memory */
	if (hw->mac.type == e1000_ich8lan) {
		/* Set Tx and Rx buffer allocation to 8k apiece. */
		ew32(PBA, E1000_PBA_8K);
		/* Set Packet Buffer Size to 16k. */
		ew32(PBS, E1000_PBS_16K);
	}

3958
	if (hw->mac.type == e1000_pchlan) {
3959 3960
		/* Save the NVM K1 bit setting */
		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3961 3962 3963
		if (ret_val)
			return ret_val;

3964
		if (kum_cfg & E1000_NVM_K1_ENABLE)
3965 3966 3967 3968 3969
			dev_spec->nvm_k1_enabled = true;
		else
			dev_spec->nvm_k1_enabled = false;
	}

3970 3971
	ctrl = er32(CTRL);

3972
	if (!hw->phy.ops.check_reset_block(hw)) {
B
Bruce Allan 已提交
3973
		/* Full-chip reset requires MAC and PHY reset at the same
3974 3975 3976 3977
		 * time to make sure the interface between MAC and the
		 * external PHY is reset.
		 */
		ctrl |= E1000_CTRL_PHY_RST;
3978

B
Bruce Allan 已提交
3979
		/* Gate automatic PHY configuration by hardware on
3980 3981 3982 3983 3984
		 * non-managed 82579
		 */
		if ((hw->mac.type == e1000_pch2lan) &&
		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
			e1000_gate_hw_phy_config_ich8lan(hw, true);
3985 3986
	}
	ret_val = e1000_acquire_swflag_ich8lan(hw);
3987
	e_dbg("Issuing a global reset to ich8lan\n");
3988
	ew32(CTRL, (ctrl | E1000_CTRL_RST));
3989
	/* cannot issue a flush here because it hangs the hardware */
3990 3991
	msleep(20);

3992 3993 3994 3995 3996 3997 3998 3999
	/* Set Phy Config Counter to 50msec */
	if (hw->mac.type == e1000_pch2lan) {
		reg = er32(FEXTNVM3);
		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
		ew32(FEXTNVM3, reg);
	}

4000
	if (!ret_val)
4001
		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4002

4003
	if (ctrl & E1000_CTRL_PHY_RST) {
4004
		ret_val = hw->phy.ops.get_cfg_done(hw);
4005
		if (ret_val)
4006
			return ret_val;
4007

4008
		ret_val = e1000_post_phy_reset_ich8lan(hw);
4009
		if (ret_val)
4010
			return ret_val;
4011
	}
4012

B
Bruce Allan 已提交
4013
	/* For PCH, this write will make sure that any noise
4014 4015 4016 4017 4018 4019
	 * will be detected as a CRC error and be dropped rather than show up
	 * as a bad packet to the DMA engine.
	 */
	if (hw->mac.type == e1000_pchlan)
		ew32(CRC_OFFSET, 0x65656565);

4020
	ew32(IMC, 0xffffffff);
4021
	er32(ICR);
4022

4023 4024 4025
	reg = er32(KABGTXD);
	reg |= E1000_KABGTXD_BGSQLBIAS;
	ew32(KABGTXD, reg);
4026

4027
	return 0;
4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038
}

/**
 *  e1000_init_hw_ich8lan - Initialize the hardware
 *  @hw: pointer to the HW structure
 *
 *  Prepares the hardware for transmit and receive by doing the following:
 *   - initialize hardware bits
 *   - initialize LED identification
 *   - setup receive address registers
 *   - setup flow control
4039
 *   - setup transmit descriptors
4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051
 *   - clear statistics
 **/
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
	struct e1000_mac_info *mac = &hw->mac;
	u32 ctrl_ext, txdctl, snoop;
	s32 ret_val;
	u16 i;

	e1000_initialize_hw_bits_ich8lan(hw);

	/* Initialize identification LED */
4052
	ret_val = mac->ops.id_led_init(hw);
4053
	/* An error is not fatal and we should not stop init due to this */
4054
	if (ret_val)
4055
		e_dbg("Error initializing identification LED\n");
4056 4057 4058 4059 4060

	/* Setup the receive address. */
	e1000e_init_rx_addrs(hw, mac->rar_entry_count);

	/* Zero out the Multicast HASH table */
4061
	e_dbg("Zeroing the MTA\n");
4062 4063 4064
	for (i = 0; i < mac->mta_reg_count; i++)
		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);

B
Bruce Allan 已提交
4065
	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4066
	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4067 4068 4069
	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
	 */
	if (hw->phy.type == e1000_phy_82578) {
4070 4071 4072
		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
		i &= ~BM_WUC_HOST_WU_BIT;
		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4073 4074 4075 4076 4077
		ret_val = e1000_phy_hw_reset_ich8lan(hw);
		if (ret_val)
			return ret_val;
	}

4078
	/* Setup link and flow control */
4079
	ret_val = mac->ops.setup_link(hw);
4080 4081

	/* Set the transmit descriptor write-back policy for both queues */
4082
	txdctl = er32(TXDCTL(0));
4083 4084 4085 4086
	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
		  E1000_TXDCTL_FULL_TX_DESC_WB);
	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4087 4088
	ew32(TXDCTL(0), txdctl);
	txdctl = er32(TXDCTL(1));
4089 4090 4091 4092
	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
		  E1000_TXDCTL_FULL_TX_DESC_WB);
	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4093
	ew32(TXDCTL(1), txdctl);
4094

B
Bruce Allan 已提交
4095
	/* ICH8 has opposite polarity of no_snoop bits.
4096 4097
	 * By default, we should use snoop behavior.
	 */
4098 4099 4100
	if (mac->type == e1000_ich8lan)
		snoop = PCIE_ICH8_SNOOP_ALL;
	else
4101
		snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4102 4103 4104 4105 4106 4107
	e1000e_set_pcie_no_snoop(hw, snoop);

	ctrl_ext = er32(CTRL_EXT);
	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
	ew32(CTRL_EXT, ctrl_ext);

B
Bruce Allan 已提交
4108
	/* Clear all of the statistics registers (clear on read).  It is
4109 4110 4111 4112 4113 4114
	 * important that we do this after we have tried to establish link
	 * because the symbol error count will increment wildly if there
	 * is no link.
	 */
	e1000_clear_hw_cntrs_ich8lan(hw);

4115
	return ret_val;
4116
}
4117

4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131
/**
 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
 *  @hw: pointer to the HW structure
 *
 *  Sets/Clears required hardware bits necessary for correctly setting up the
 *  hardware for transmit and receive.
 **/
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
{
	u32 reg;

	/* Extended Device Control */
	reg = er32(CTRL_EXT);
	reg |= (1 << 22);
4132 4133 4134
	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
	if (hw->mac.type >= e1000_pchlan)
		reg |= E1000_CTRL_EXT_PHYPDEN;
4135 4136 4137
	ew32(CTRL_EXT, reg);

	/* Transmit Descriptor Control 0 */
4138
	reg = er32(TXDCTL(0));
4139
	reg |= (1 << 22);
4140
	ew32(TXDCTL(0), reg);
4141 4142

	/* Transmit Descriptor Control 1 */
4143
	reg = er32(TXDCTL(1));
4144
	reg |= (1 << 22);
4145
	ew32(TXDCTL(1), reg);
4146 4147

	/* Transmit Arbitration Control 0 */
4148
	reg = er32(TARC(0));
4149 4150 4151
	if (hw->mac.type == e1000_ich8lan)
		reg |= (1 << 28) | (1 << 29);
	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4152
	ew32(TARC(0), reg);
4153 4154

	/* Transmit Arbitration Control 1 */
4155
	reg = er32(TARC(1));
4156 4157 4158 4159 4160
	if (er32(TCTL) & E1000_TCTL_MULR)
		reg &= ~(1 << 28);
	else
		reg |= (1 << 28);
	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4161
	ew32(TARC(1), reg);
4162 4163 4164 4165 4166 4167 4168

	/* Device Status */
	if (hw->mac.type == e1000_ich8lan) {
		reg = er32(STATUS);
		reg &= ~(1 << 31);
		ew32(STATUS, reg);
	}
4169

B
Bruce Allan 已提交
4170
	/* work-around descriptor data corruption issue during nfs v2 udp
4171 4172 4173 4174
	 * traffic, just disable the nfs filtering capability
	 */
	reg = er32(RFCTL);
	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4175

B
Bruce Allan 已提交
4176
	/* Disable IPv6 extension header parsing because some malformed
4177 4178 4179 4180
	 * IPv6 headers can hang the Rx.
	 */
	if (hw->mac.type == e1000_ich8lan)
		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4181
	ew32(RFCTL, reg);
4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192

	/* Enable ECC on Lynxpoint */
	if (hw->mac.type == e1000_pch_lpt) {
		reg = er32(PBECCSTS);
		reg |= E1000_PBECCSTS_ECC_ENABLE;
		ew32(PBECCSTS, reg);

		reg = er32(CTRL);
		reg |= E1000_CTRL_MEHE;
		ew32(CTRL, reg);
	}
4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
}

/**
 *  e1000_setup_link_ich8lan - Setup flow control and link settings
 *  @hw: pointer to the HW structure
 *
 *  Determines which flow control settings to use, then configures flow
 *  control.  Calls the appropriate media-specific link configuration
 *  function.  Assuming the adapter has a valid link partner, a valid link
 *  should be established.  Assumes the hardware has previously been reset
 *  and the transmitter and receiver are not enabled.
 **/
static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val;

4209
	if (hw->phy.ops.check_reset_block(hw))
4210 4211
		return 0;

B
Bruce Allan 已提交
4212
	/* ICH parts do not have a word in the NVM to determine
4213 4214 4215
	 * the default flow control setting, so we explicitly
	 * set it to full.
	 */
4216 4217 4218 4219 4220 4221 4222
	if (hw->fc.requested_mode == e1000_fc_default) {
		/* Workaround h/w hang when Tx flow control enabled */
		if (hw->mac.type == e1000_pchlan)
			hw->fc.requested_mode = e1000_fc_rx_pause;
		else
			hw->fc.requested_mode = e1000_fc_full;
	}
4223

B
Bruce Allan 已提交
4224
	/* Save off the requested flow control mode for use later.  Depending
4225 4226 4227
	 * on the link partner's capabilities, we may or may not use this mode.
	 */
	hw->fc.current_mode = hw->fc.requested_mode;
4228

4229
	e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4230 4231

	/* Continue to configure the copper link. */
4232
	ret_val = hw->mac.ops.setup_physical_interface(hw);
4233 4234 4235
	if (ret_val)
		return ret_val;

4236
	ew32(FCTTV, hw->fc.pause_time);
4237
	if ((hw->phy.type == e1000_phy_82578) ||
4238
	    (hw->phy.type == e1000_phy_82579) ||
B
Bruce Allan 已提交
4239
	    (hw->phy.type == e1000_phy_i217) ||
4240
	    (hw->phy.type == e1000_phy_82577)) {
4241 4242
		ew32(FCRTV_PCH, hw->fc.refresh_time);

4243 4244
		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
				   hw->fc.pause_time);
4245 4246 4247
		if (ret_val)
			return ret_val;
	}
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270

	return e1000e_set_fc_watermarks(hw);
}

/**
 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
 *  @hw: pointer to the HW structure
 *
 *  Configures the kumeran interface to the PHY to wait the appropriate time
 *  when polling the PHY, then call the generic setup_copper_link to finish
 *  configuring the copper link.
 **/
static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
{
	u32 ctrl;
	s32 ret_val;
	u16 reg_data;

	ctrl = er32(CTRL);
	ctrl |= E1000_CTRL_SLU;
	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
	ew32(CTRL, ctrl);

B
Bruce Allan 已提交
4271
	/* Set the mac to wait the maximum time between each iteration
4272
	 * and increase the max iterations when polling the phy;
4273 4274
	 * this fixes erroneous timeouts at 10Mbps.
	 */
4275
	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
4276 4277
	if (ret_val)
		return ret_val;
4278
	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
4279
				       &reg_data);
4280 4281 4282
	if (ret_val)
		return ret_val;
	reg_data |= 0x3F;
4283
	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
4284
					reg_data);
4285 4286 4287
	if (ret_val)
		return ret_val;

4288 4289
	switch (hw->phy.type) {
	case e1000_phy_igp_3:
4290 4291 4292
		ret_val = e1000e_copper_link_setup_igp(hw);
		if (ret_val)
			return ret_val;
4293 4294 4295
		break;
	case e1000_phy_bm:
	case e1000_phy_82578:
4296 4297 4298
		ret_val = e1000e_copper_link_setup_m88(hw);
		if (ret_val)
			return ret_val;
4299 4300
		break;
	case e1000_phy_82577:
4301
	case e1000_phy_82579:
4302 4303 4304 4305 4306
		ret_val = e1000_copper_link_setup_82577(hw);
		if (ret_val)
			return ret_val;
		break;
	case e1000_phy_ife:
4307
		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
		if (ret_val)
			return ret_val;

		reg_data &= ~IFE_PMC_AUTO_MDIX;

		switch (hw->phy.mdix) {
		case 1:
			reg_data &= ~IFE_PMC_FORCE_MDIX;
			break;
		case 2:
			reg_data |= IFE_PMC_FORCE_MDIX;
			break;
		case 0:
		default:
			reg_data |= IFE_PMC_AUTO_MDIX;
			break;
		}
4325
		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
4326 4327
		if (ret_val)
			return ret_val;
4328 4329 4330
		break;
	default:
		break;
4331
	}
4332

4333 4334 4335
	return e1000e_setup_copper_link(hw);
}

4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360
/**
 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
 *  @hw: pointer to the HW structure
 *
 *  Calls the PHY specific link setup function and then calls the
 *  generic setup_copper_link to finish configuring the link for
 *  Lynxpoint PCH devices
 **/
static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
{
	u32 ctrl;
	s32 ret_val;

	ctrl = er32(CTRL);
	ctrl |= E1000_CTRL_SLU;
	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
	ew32(CTRL, ctrl);

	ret_val = e1000_copper_link_setup_82577(hw);
	if (ret_val)
		return ret_val;

	return e1000e_setup_copper_link(hw);
}

4361 4362 4363 4364 4365 4366
/**
 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
 *  @hw: pointer to the HW structure
 *  @speed: pointer to store current link speed
 *  @duplex: pointer to store the current link duplex
 *
4367
 *  Calls the generic get_speed_and_duplex to retrieve the current link
4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380
 *  information and then calls the Kumeran lock loss workaround for links at
 *  gigabit speeds.
 **/
static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
					  u16 *duplex)
{
	s32 ret_val;

	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
	if (ret_val)
		return ret_val;

	if ((hw->mac.type == e1000_ich8lan) &&
4381
	    (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413
		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
	}

	return ret_val;
}

/**
 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
 *  @hw: pointer to the HW structure
 *
 *  Work-around for 82566 Kumeran PCS lock loss:
 *  On link status change (i.e. PCI reset, speed change) and link is up and
 *  speed is gigabit-
 *    0) if workaround is optionally disabled do nothing
 *    1) wait 1ms for Kumeran link to come up
 *    2) check Kumeran Diagnostic register PCS lock loss bit
 *    3) if not set the link is locked (all is good), otherwise...
 *    4) reset the PHY
 *    5) repeat up to 10 times
 *  Note: this is only called for IGP3 copper when speed is 1gb.
 **/
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
{
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
	u32 phy_ctrl;
	s32 ret_val;
	u16 i, data;
	bool link;

	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
		return 0;

B
Bruce Allan 已提交
4414
	/* Make sure link is up before proceeding.  If not just return.
4415
	 * Attempting this while link is negotiating fouled up link
4416 4417
	 * stability
	 */
4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445
	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
	if (!link)
		return 0;

	for (i = 0; i < 10; i++) {
		/* read once to clear */
		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
		if (ret_val)
			return ret_val;
		/* and again to get new status */
		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
		if (ret_val)
			return ret_val;

		/* check for PCS lock */
		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
			return 0;

		/* Issue PHY reset */
		e1000_phy_hw_reset(hw);
		mdelay(5);
	}
	/* Disable GigE link negotiation */
	phy_ctrl = er32(PHY_CTRL);
	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
	ew32(PHY_CTRL, phy_ctrl);

B
Bruce Allan 已提交
4446
	/* Call gig speed drop workaround on Gig disable before accessing
4447 4448
	 * any PHY registers
	 */
4449 4450 4451 4452 4453 4454 4455
	e1000e_gig_downshift_workaround_ich8lan(hw);

	/* unable to acquire PCS lock */
	return -E1000_ERR_PHY;
}

/**
4456
 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
4457
 *  @hw: pointer to the HW structure
4458
 *  @state: boolean value used to set the current Kumeran workaround state
4459
 *
4460 4461
 *  If ICH8, set the current Kumeran workaround state (enabled - true
 *  /disabled - false).
4462 4463
 **/
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
4464
						  bool state)
4465 4466 4467 4468
{
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;

	if (hw->mac.type != e1000_ich8lan) {
4469
		e_dbg("Workaround applies to ICH8 only.\n");
4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489
		return;
	}

	dev_spec->kmrn_lock_loss_workaround_enabled = state;
}

/**
 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
 *  @hw: pointer to the HW structure
 *
 *  Workaround for 82566 power-down on D3 entry:
 *    1) disable gigabit link
 *    2) write VR power-down enable
 *    3) read it back
 *  Continue if successful, else issue LCD reset and repeat
 **/
void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
{
	u32 reg;
	u16 data;
B
Bruce Allan 已提交
4490
	u8 retry = 0;
4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502

	if (hw->phy.type != e1000_phy_igp_3)
		return;

	/* Try the workaround twice (if needed) */
	do {
		/* Disable link */
		reg = er32(PHY_CTRL);
		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
		ew32(PHY_CTRL, reg);

B
Bruce Allan 已提交
4503
		/* Call gig speed drop workaround on Gig disable before
4504 4505
		 * accessing any PHY registers
		 */
4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531
		if (hw->mac.type == e1000_ich8lan)
			e1000e_gig_downshift_workaround_ich8lan(hw);

		/* Write VR power-down enable */
		e1e_rphy(hw, IGP3_VR_CTRL, &data);
		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);

		/* Read it back and test */
		e1e_rphy(hw, IGP3_VR_CTRL, &data);
		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
			break;

		/* Issue PHY reset and repeat at most one more time */
		reg = er32(CTRL);
		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
		retry++;
	} while (retry);
}

/**
 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
 *  @hw: pointer to the HW structure
 *
 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
4532
 *  LPLU, Gig disable, MDIC PHY reset):
4533 4534
 *    1) Set Kumeran Near-end loopback
 *    2) Clear Kumeran Near-end loopback
4535
 *  Should only be called for ICH8[m] devices with any 1G Phy.
4536 4537 4538 4539 4540 4541
 **/
void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
{
	s32 ret_val;
	u16 reg_data;

4542
	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
4543 4544 4545
		return;

	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4546
				       &reg_data);
4547 4548 4549 4550
	if (ret_val)
		return;
	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
4551
					reg_data);
4552 4553 4554
	if (ret_val)
		return;
	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
4555
	e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
4556 4557
}

4558
/**
4559
 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
4560 4561 4562 4563
 *  @hw: pointer to the HW structure
 *
 *  During S0 to Sx transition, it is possible the link remains at gig
 *  instead of negotiating to a lower speed.  Before going to Sx, set
4564 4565 4566 4567
 *  'Gig Disable' to force link speed negotiation to a lower speed based on
 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
 *  needs to be written.
B
Bruce Allan 已提交
4568 4569 4570
 *  Parts that support (and are linked to a partner which support) EEE in
 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
 *  than 10Mbps w/o EEE.
4571
 **/
4572
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4573
{
B
Bruce Allan 已提交
4574
	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4575
	u32 phy_ctrl;
4576
	s32 ret_val;
4577

4578
	phy_ctrl = er32(PHY_CTRL);
4579
	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4580

B
Bruce Allan 已提交
4581
	if (hw->phy.type == e1000_phy_i217) {
4582 4583 4584
		u16 phy_reg, device_id = hw->adapter->pdev->device;

		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4585 4586 4587
		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
		    (device_id == E1000_DEV_ID_PCH_I218_V3)) {
4588 4589 4590 4591
			u32 fextnvm6 = er32(FEXTNVM6);

			ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
		}
B
Bruce Allan 已提交
4592 4593 4594 4595 4596 4597 4598 4599

		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val)
			goto out;

		if (!dev_spec->eee_disable) {
			u16 eee_advert;

4600 4601 4602 4603
			ret_val =
			    e1000_read_emi_reg_locked(hw,
						      I217_EEE_ADVERTISEMENT,
						      &eee_advert);
B
Bruce Allan 已提交
4604 4605 4606
			if (ret_val)
				goto release;

B
Bruce Allan 已提交
4607
			/* Disable LPLU if both link partners support 100BaseT
B
Bruce Allan 已提交
4608 4609 4610
			 * EEE and 100Full is advertised on both ends of the
			 * link.
			 */
4611
			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
B
Bruce Allan 已提交
4612
			    (dev_spec->eee_lp_ability &
4613
			     I82579_EEE_100_SUPPORTED) &&
B
Bruce Allan 已提交
4614 4615 4616 4617 4618
			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
					      E1000_PHY_CTRL_NOND0A_LPLU);
		}

B
Bruce Allan 已提交
4619
		/* For i217 Intel Rapid Start Technology support,
B
Bruce Allan 已提交
4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631
		 * when the system is going into Sx and no manageability engine
		 * is present, the driver must configure proxy to reset only on
		 * power good.  LPI (Low Power Idle) state must also reset only
		 * on power good, as well as the MTA (Multicast table array).
		 * The SMBus release must also be disabled on LCD reset.
		 */
		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
			/* Enable proxy to reset only on power good. */
			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);

B
Bruce Allan 已提交
4632
			/* Set bit enable LPI (EEE) to reset only on
B
Bruce Allan 已提交
4633 4634 4635
			 * power good.
			 */
			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4636
			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
B
Bruce Allan 已提交
4637 4638 4639 4640
			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);

			/* Disable the SMB release on LCD reset. */
			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4641
			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
B
Bruce Allan 已提交
4642 4643 4644
			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
		}

B
Bruce Allan 已提交
4645
		/* Enable MTA to reset for Intel Rapid Start Technology
B
Bruce Allan 已提交
4646 4647 4648
		 * Support
		 */
		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4649
		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
B
Bruce Allan 已提交
4650 4651 4652 4653 4654 4655
		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);

release:
		hw->phy.ops.release(hw);
	}
out:
4656
	ew32(PHY_CTRL, phy_ctrl);
4657

4658 4659 4660
	if (hw->mac.type == e1000_ich8lan)
		e1000e_gig_downshift_workaround_ich8lan(hw);

4661
	if (hw->mac.type >= e1000_pchlan) {
4662
		e1000_oem_bits_config_ich8lan(hw, false);
B
Bruce Allan 已提交
4663 4664 4665 4666 4667

		/* Reset PHY to activate OEM bits on 82577/8 */
		if (hw->mac.type == e1000_pchlan)
			e1000e_phy_hw_reset_generic(hw);

4668 4669 4670 4671 4672 4673
		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val)
			return;
		e1000_write_smbus_addr(hw);
		hw->phy.ops.release(hw);
	}
4674 4675
}

4676 4677 4678 4679 4680 4681 4682 4683
/**
 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
 *  @hw: pointer to the HW structure
 *
 *  During Sx to S0 transitions on non-managed devices or managed devices
 *  on which PHY resets are not blocked, if the PHY registers cannot be
 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
 *  the PHY.
B
Bruce Allan 已提交
4684
 *  On i217, setup Intel Rapid Start Technology.
4685 4686 4687
 **/
void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
{
4688
	s32 ret_val;
4689

4690
	if (hw->mac.type < e1000_pch2lan)
4691 4692
		return;

4693
	ret_val = e1000_init_phy_workarounds_pchlan(hw);
4694
	if (ret_val) {
4695
		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
4696 4697
		return;
	}
B
Bruce Allan 已提交
4698

B
Bruce Allan 已提交
4699
	/* For i217 Intel Rapid Start Technology support when the system
B
Bruce Allan 已提交
4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713
	 * is transitioning from Sx and no manageability engine is present
	 * configure SMBus to restore on reset, disable proxy, and enable
	 * the reset on MTA (Multicast table array).
	 */
	if (hw->phy.type == e1000_phy_i217) {
		u16 phy_reg;

		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val) {
			e_dbg("Failed to setup iRST\n");
			return;
		}

		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
B
Bruce Allan 已提交
4714
			/* Restore clear on SMB if no manageability engine
B
Bruce Allan 已提交
4715 4716 4717 4718 4719
			 * is present
			 */
			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
			if (ret_val)
				goto release;
4720
			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
B
Bruce Allan 已提交
4721 4722 4723 4724 4725 4726 4727 4728 4729
			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);

			/* Disable Proxy */
			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
		}
		/* Enable reset on MTA */
		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
		if (ret_val)
			goto release;
4730
		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
B
Bruce Allan 已提交
4731 4732 4733 4734 4735 4736
		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
		if (ret_val)
			e_dbg("Error %d in resume workarounds\n", ret_val);
		hw->phy.ops.release(hw);
	}
4737 4738
}

4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754
/**
 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
 *  @hw: pointer to the HW structure
 *
 *  Return the LED back to the default configuration.
 **/
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
{
	if (hw->phy.type == e1000_phy_ife)
		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);

	ew32(LEDCTL, hw->mac.ledctl_default);
	return 0;
}

/**
4755
 *  e1000_led_on_ich8lan - Turn LEDs on
4756 4757
 *  @hw: pointer to the HW structure
 *
4758
 *  Turn on the LEDs.
4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770
 **/
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
{
	if (hw->phy.type == e1000_phy_ife)
		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));

	ew32(LEDCTL, hw->mac.ledctl_mode2);
	return 0;
}

/**
4771
 *  e1000_led_off_ich8lan - Turn LEDs off
4772 4773
 *  @hw: pointer to the HW structure
 *
4774
 *  Turn off the LEDs.
4775 4776 4777 4778 4779
 **/
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
	if (hw->phy.type == e1000_phy_ife)
		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
4780 4781
				(IFE_PSCL_PROBE_MODE |
				 IFE_PSCL_PROBE_LEDS_OFF));
4782 4783 4784 4785 4786

	ew32(LEDCTL, hw->mac.ledctl_mode1);
	return 0;
}

4787 4788 4789 4790 4791 4792 4793 4794
/**
 *  e1000_setup_led_pchlan - Configures SW controllable LED
 *  @hw: pointer to the HW structure
 *
 *  This prepares the SW controllable LED for use.
 **/
static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
{
4795
	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
}

/**
 *  e1000_cleanup_led_pchlan - Restore the default LED operation
 *  @hw: pointer to the HW structure
 *
 *  Return the LED back to the default configuration.
 **/
static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
{
4806
	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819
}

/**
 *  e1000_led_on_pchlan - Turn LEDs on
 *  @hw: pointer to the HW structure
 *
 *  Turn on the LEDs.
 **/
static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
{
	u16 data = (u16)hw->mac.ledctl_mode2;
	u32 i, led;

B
Bruce Allan 已提交
4820
	/* If no link, then turn LED on by setting the invert bit
4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835
	 * for each LED that's mode is "link_up" in ledctl_mode2.
	 */
	if (!(er32(STATUS) & E1000_STATUS_LU)) {
		for (i = 0; i < 3; i++) {
			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
			if ((led & E1000_PHY_LED0_MODE_MASK) !=
			    E1000_LEDCTL_MODE_LINK_UP)
				continue;
			if (led & E1000_PHY_LED0_IVRT)
				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
			else
				data |= (E1000_PHY_LED0_IVRT << (i * 5));
		}
	}

4836
	return e1e_wphy(hw, HV_LED_CONFIG, data);
4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849
}

/**
 *  e1000_led_off_pchlan - Turn LEDs off
 *  @hw: pointer to the HW structure
 *
 *  Turn off the LEDs.
 **/
static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
{
	u16 data = (u16)hw->mac.ledctl_mode1;
	u32 i, led;

B
Bruce Allan 已提交
4850
	/* If no link, then turn LED off by clearing the invert bit
4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865
	 * for each LED that's mode is "link_up" in ledctl_mode1.
	 */
	if (!(er32(STATUS) & E1000_STATUS_LU)) {
		for (i = 0; i < 3; i++) {
			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
			if ((led & E1000_PHY_LED0_MODE_MASK) !=
			    E1000_LEDCTL_MODE_LINK_UP)
				continue;
			if (led & E1000_PHY_LED0_IVRT)
				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
			else
				data |= (E1000_PHY_LED0_IVRT << (i * 5));
		}
	}

4866
	return e1e_wphy(hw, HV_LED_CONFIG, data);
4867 4868
}

4869
/**
4870
 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
4871 4872
 *  @hw: pointer to the HW structure
 *
4873 4874 4875 4876 4877 4878 4879
 *  Read appropriate register for the config done bit for completion status
 *  and configure the PHY through s/w for EEPROM-less parts.
 *
 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
 *  config done bit, so only an error is logged and continues.  If we were
 *  to return with error, EEPROM-less silicon would not be able to be reset
 *  or change link.
4880 4881 4882
 **/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
4883
	s32 ret_val = 0;
4884
	u32 bank = 0;
4885
	u32 status;
4886

4887
	e1000e_get_cfg_done_generic(hw);
4888

4889 4890 4891 4892 4893 4894
	/* Wait for indication from h/w that it has completed basic config */
	if (hw->mac.type >= e1000_ich10lan) {
		e1000_lan_init_done_ich8lan(hw);
	} else {
		ret_val = e1000e_get_auto_rd_done(hw);
		if (ret_val) {
B
Bruce Allan 已提交
4895
			/* When auto config read does not complete, do not
4896 4897 4898 4899 4900 4901
			 * return with an error. This can happen in situations
			 * where there is no eeprom and prevents getting link.
			 */
			e_dbg("Auto Read Done did not complete\n");
			ret_val = 0;
		}
4902 4903
	}

4904 4905 4906 4907 4908 4909
	/* Clear PHY Reset Asserted bit */
	status = er32(STATUS);
	if (status & E1000_STATUS_PHYRA)
		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
	else
		e_dbg("PHY Reset Asserted not set - needs delay\n");
4910 4911

	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
4912
	if (hw->mac.type <= e1000_ich9lan) {
B
Bruce Allan 已提交
4913
		if (!(er32(EECD) & E1000_EECD_PRES) &&
4914 4915 4916 4917 4918 4919
		    (hw->phy.type == e1000_phy_igp_3)) {
			e1000e_phy_init_script_igp3(hw);
		}
	} else {
		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
			/* Maybe we should do a basic PHY config */
4920
			e_dbg("EEPROM not present\n");
4921
			ret_val = -E1000_ERR_CONFIG;
4922 4923 4924
		}
	}

4925
	return ret_val;
4926 4927
}

4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942
/**
 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
 * @hw: pointer to the HW structure
 *
 * In the case of a PHY power down to save power, or to turn off link during a
 * driver unload, or wake on lan is not enabled, remove the link.
 **/
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
{
	/* If the management interface is not enabled, then power down */
	if (!(hw->mac.ops.check_mng_mode(hw) ||
	      hw->phy.ops.check_reset_block(hw)))
		e1000_power_down_phy_copper(hw);
}

4943 4944 4945 4946 4947 4948 4949 4950 4951
/**
 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
 *  @hw: pointer to the HW structure
 *
 *  Clears hardware counters specific to the silicon family and calls
 *  clear_hw_cntrs_generic to clear all general purpose counters.
 **/
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
{
4952
	u16 phy_data;
4953
	s32 ret_val;
4954 4955 4956

	e1000e_clear_hw_cntrs_base(hw);

4957 4958 4959 4960 4961 4962
	er32(ALGNERRC);
	er32(RXERRC);
	er32(TNCRS);
	er32(CEXTERR);
	er32(TSCTC);
	er32(TSCTFC);
4963

4964 4965 4966
	er32(MGTPRC);
	er32(MGTPDC);
	er32(MGTPTC);
4967

4968 4969
	er32(IAC);
	er32(ICRXOC);
4970

4971 4972
	/* Clear PHY statistics registers */
	if ((hw->phy.type == e1000_phy_82578) ||
4973
	    (hw->phy.type == e1000_phy_82579) ||
B
Bruce Allan 已提交
4974
	    (hw->phy.type == e1000_phy_i217) ||
4975
	    (hw->phy.type == e1000_phy_82577)) {
4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998
		ret_val = hw->phy.ops.acquire(hw);
		if (ret_val)
			return;
		ret_val = hw->phy.ops.set_page(hw,
					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
		if (ret_val)
			goto release;
		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
release:
		hw->phy.ops.release(hw);
4999
	}
5000 5001
}

J
Jeff Kirsher 已提交
5002
static const struct e1000_mac_operations ich8_mac_ops = {
5003
	/* check_mng_mode dependent on mac type */
5004
	.check_for_link		= e1000_check_for_copper_link_ich8lan,
5005
	/* cleanup_led dependent on mac type */
5006 5007
	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
	.get_bus_info		= e1000_get_bus_info_ich8lan,
5008
	.set_lan_id		= e1000_set_lan_id_single_port,
5009
	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
5010 5011
	/* led_on dependent on mac type */
	/* led_off dependent on mac type */
5012
	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
5013 5014 5015
	.reset_hw		= e1000_reset_hw_ich8lan,
	.init_hw		= e1000_init_hw_ich8lan,
	.setup_link		= e1000_setup_link_ich8lan,
5016
	.setup_physical_interface = e1000_setup_copper_link_ich8lan,
5017
	/* id_led_init dependent on mac type */
5018
	.config_collision_dist	= e1000e_config_collision_dist_generic,
5019
	.rar_set		= e1000e_rar_set_generic,
5020
	.rar_get_count		= e1000e_rar_get_count_generic,
5021 5022
};

J
Jeff Kirsher 已提交
5023
static const struct e1000_phy_operations ich8_phy_ops = {
5024
	.acquire		= e1000_acquire_swflag_ich8lan,
5025
	.check_reset_block	= e1000_check_reset_block_ich8lan,
5026
	.commit			= NULL,
5027
	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
5028
	.get_cable_length	= e1000e_get_cable_length_igp_2,
5029 5030 5031
	.read_reg		= e1000e_read_phy_reg_igp,
	.release		= e1000_release_swflag_ich8lan,
	.reset			= e1000_phy_hw_reset_ich8lan,
5032 5033
	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
5034
	.write_reg		= e1000e_write_phy_reg_igp,
5035 5036
};

J
Jeff Kirsher 已提交
5037
static const struct e1000_nvm_operations ich8_nvm_ops = {
5038
	.acquire		= e1000_acquire_nvm_ich8lan,
5039
	.read			= e1000_read_nvm_ich8lan,
5040
	.release		= e1000_release_nvm_ich8lan,
5041
	.reload			= e1000e_reload_nvm_generic,
5042
	.update			= e1000_update_nvm_checksum_ich8lan,
5043
	.valid_led_default	= e1000_valid_led_default_ich8lan,
5044 5045
	.validate		= e1000_validate_nvm_checksum_ich8lan,
	.write			= e1000_write_nvm_ich8lan,
5046 5047
};

J
Jeff Kirsher 已提交
5048
const struct e1000_info e1000_ich8_info = {
5049 5050
	.mac			= e1000_ich8lan,
	.flags			= FLAG_HAS_WOL
5051
				  | FLAG_IS_ICH
5052 5053 5054 5055 5056
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_APME_IN_WUC,
	.pba			= 8,
5057
	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
J
Jeff Kirsher 已提交
5058
	.get_variants		= e1000_get_variants_ich8lan,
5059 5060 5061 5062 5063
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};

J
Jeff Kirsher 已提交
5064
const struct e1000_info e1000_ich9_info = {
5065 5066
	.mac			= e1000_ich9lan,
	.flags			= FLAG_HAS_JUMBO_FRAMES
5067
				  | FLAG_IS_ICH
5068 5069 5070 5071 5072
				  | FLAG_HAS_WOL
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_APME_IN_WUC,
5073
	.pba			= 18,
5074
	.max_hw_frame_size	= DEFAULT_JUMBO,
J
Jeff Kirsher 已提交
5075
	.get_variants		= e1000_get_variants_ich8lan,
5076 5077 5078 5079 5080
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};

J
Jeff Kirsher 已提交
5081
const struct e1000_info e1000_ich10_info = {
5082 5083 5084 5085 5086 5087 5088 5089
	.mac			= e1000_ich10lan,
	.flags			= FLAG_HAS_JUMBO_FRAMES
				  | FLAG_IS_ICH
				  | FLAG_HAS_WOL
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_APME_IN_WUC,
5090
	.pba			= 18,
5091
	.max_hw_frame_size	= DEFAULT_JUMBO,
5092 5093 5094 5095 5096
	.get_variants		= e1000_get_variants_ich8lan,
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};
5097

J
Jeff Kirsher 已提交
5098
const struct e1000_info e1000_pch_info = {
5099 5100 5101 5102 5103 5104 5105
	.mac			= e1000_pchlan,
	.flags			= FLAG_IS_ICH
				  | FLAG_HAS_WOL
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_HAS_JUMBO_FRAMES
5106
				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5107
				  | FLAG_APME_IN_WUC,
5108
	.flags2			= FLAG2_HAS_PHY_STATS,
5109 5110 5111 5112 5113 5114 5115
	.pba			= 26,
	.max_hw_frame_size	= 4096,
	.get_variants		= e1000_get_variants_ich8lan,
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};
5116

J
Jeff Kirsher 已提交
5117
const struct e1000_info e1000_pch2_info = {
5118 5119 5120
	.mac			= e1000_pch2lan,
	.flags			= FLAG_IS_ICH
				  | FLAG_HAS_WOL
5121
				  | FLAG_HAS_HW_TIMESTAMP
5122 5123 5124 5125 5126
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_HAS_JUMBO_FRAMES
				  | FLAG_APME_IN_WUC,
5127 5128
	.flags2			= FLAG2_HAS_PHY_STATS
				  | FLAG2_HAS_EEE,
5129
	.pba			= 26,
5130
	.max_hw_frame_size	= 9018,
5131 5132 5133 5134 5135
	.get_variants		= e1000_get_variants_ich8lan,
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};
B
Bruce Allan 已提交
5136 5137 5138 5139 5140

const struct e1000_info e1000_pch_lpt_info = {
	.mac			= e1000_pch_lpt,
	.flags			= FLAG_IS_ICH
				  | FLAG_HAS_WOL
5141
				  | FLAG_HAS_HW_TIMESTAMP
B
Bruce Allan 已提交
5142 5143 5144 5145 5146 5147 5148 5149
				  | FLAG_HAS_CTRLEXT_ON_LOAD
				  | FLAG_HAS_AMT
				  | FLAG_HAS_FLASH
				  | FLAG_HAS_JUMBO_FRAMES
				  | FLAG_APME_IN_WUC,
	.flags2			= FLAG2_HAS_PHY_STATS
				  | FLAG2_HAS_EEE,
	.pba			= 26,
5150
	.max_hw_frame_size	= 9018,
B
Bruce Allan 已提交
5151 5152 5153 5154 5155
	.get_variants		= e1000_get_variants_ich8lan,
	.mac_ops		= &ich8_mac_ops,
	.phy_ops		= &ich8_phy_ops,
	.nvm_ops		= &ich8_nvm_ops,
};