dwmac-intel.c 31.6 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation
 */

#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
8
#include "dwmac-intel.h"
9
#include "dwmac4.h"
10
#include "stmmac.h"
11
#include "stmmac_ptp.h"
12

13 14
struct intel_priv_data {
	int mdio_adhoc_addr;	/* mdio address for serdes & etc */
15
	unsigned long crossts_adj;
16
	bool is_pse;
17 18
};

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
/* This struct is used to associate PCI Function of MAC controller on a board,
 * discovered via DMI, with the address of PHY connected to the MAC. The
 * negative value of the address means that MAC controller is not connected
 * with PHY.
 */
struct stmmac_pci_func_data {
	unsigned int func;
	int phy_addr;
};

struct stmmac_pci_dmi_data {
	const struct stmmac_pci_func_data *func;
	size_t nfuncs;
};

struct stmmac_pci_info {
	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};

static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
				    const struct dmi_system_id *dmi_list)
{
	const struct stmmac_pci_func_data *func_data;
	const struct stmmac_pci_dmi_data *dmi_data;
	const struct dmi_system_id *dmi_id;
	int func = PCI_FUNC(pdev->devfn);
	size_t n;

	dmi_id = dmi_first_match(dmi_list);
	if (!dmi_id)
		return -ENODEV;

	dmi_data = dmi_id->driver_data;
	func_data = dmi_data->func;

	for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
		if (func_data->func == func)
			return func_data->phy_addr;

	return -ENODEV;
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
			      int phyreg, u32 mask, u32 val)
{
	unsigned int retries = 10;
	int val_rd;

	do {
		val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
		if ((val_rd & mask) == (val & mask))
			return 0;
		udelay(POLL_DELAY_US);
	} while (--retries);

	return -ETIMEDOUT;
}

static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
{
	struct intel_priv_data *intel_priv = priv_data;
	struct stmmac_priv *priv = netdev_priv(ndev);
	int serdes_phy_addr = 0;
	u32 data = 0;

	if (!intel_priv->mdio_adhoc_addr)
		return 0;

	serdes_phy_addr = intel_priv->mdio_adhoc_addr;

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	/* Set the serdes rate and the PCLK rate */
	data = mdiobus_read(priv->mii, serdes_phy_addr,
			    SERDES_GCR0);

	data &= ~SERDES_RATE_MASK;
	data &= ~SERDES_PCLK_MASK;

	if (priv->plat->max_speed == 2500)
		data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
			SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
	else
		data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
			SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;

	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);

105
	/* assert clk_req */
106
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107
	data |= SERDES_PLL_CLK;
108
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109 110 111 112 113 114 115 116 117 118 119 120 121

	/* check for clk_ack assertion */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_PLL_CLK,
				  SERDES_PLL_CLK);

	if (data) {
		dev_err(priv->device, "Serdes PLL clk request timeout\n");
		return data;
	}

	/* assert lane reset */
122
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123
	data |= SERDES_RST;
124
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125 126 127 128 129 130 131 132 133 134 135 136 137

	/* check for assert lane reset reflection */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_RST,
				  SERDES_RST);

	if (data) {
		dev_err(priv->device, "Serdes assert lane reset timeout\n");
		return data;
	}

	/*  move power state to P0 */
138
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139 140 141 142

	data &= ~SERDES_PWR_ST_MASK;
	data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;

143
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144 145 146 147 148 149 150 151 152 153 154 155

	/* Check for P0 state */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_PWR_ST_MASK,
				  SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);

	if (data) {
		dev_err(priv->device, "Serdes power state P0 timeout.\n");
		return data;
	}

156 157 158 159 160
	/* PSE only - ungate SGMII PHY Rx Clock */
	if (intel_priv->is_pse)
		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
			       0, SERDES_PHY_RX_CLK);

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	return 0;
}

static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
{
	struct intel_priv_data *intel_priv = intel_data;
	struct stmmac_priv *priv = netdev_priv(ndev);
	int serdes_phy_addr = 0;
	u32 data = 0;

	if (!intel_priv->mdio_adhoc_addr)
		return;

	serdes_phy_addr = intel_priv->mdio_adhoc_addr;

176 177 178 179 180
	/* PSE only - gate SGMII PHY Rx Clock */
	if (intel_priv->is_pse)
		mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
			       SERDES_PHY_RX_CLK, 0);

181
	/*  move power state to P3 */
182
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
183 184 185 186

	data &= ~SERDES_PWR_ST_MASK;
	data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;

187
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
188 189 190 191 192 193 194 195 196 197 198 199 200

	/* Check for P3 state */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_PWR_ST_MASK,
				  SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);

	if (data) {
		dev_err(priv->device, "Serdes power state P3 timeout\n");
		return;
	}

	/* de-assert clk_req */
201
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
202
	data &= ~SERDES_PLL_CLK;
203
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204 205 206 207 208 209 210 211 212 213 214 215 216

	/* check for clk_ack de-assert */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_PLL_CLK,
				  (u32)~SERDES_PLL_CLK);

	if (data) {
		dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
		return;
	}

	/* de-assert lane reset */
217
	data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218
	data &= ~SERDES_RST;
219
	mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220 221 222 223 224 225 226 227 228 229 230 231 232

	/* check for de-assert lane reset reflection */
	data = serdes_status_poll(priv, serdes_phy_addr,
				  SERDES_GSR0,
				  SERDES_RST,
				  (u32)~SERDES_RST);

	if (data) {
		dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
		return;
	}
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
{
	struct intel_priv_data *intel_priv = intel_data;
	struct stmmac_priv *priv = netdev_priv(ndev);
	int serdes_phy_addr = 0;
	u32 data = 0;

	serdes_phy_addr = intel_priv->mdio_adhoc_addr;

	/* Determine the link speed mode: 2.5Gbps/1Gbps */
	data = mdiobus_read(priv->mii, serdes_phy_addr,
			    SERDES_GCR);

	if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
	    SERDES_LINK_MODE_2G5) {
		dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
		priv->plat->max_speed = 2500;
		priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
		priv->plat->mdio_bus_data->xpcs_an_inband = false;
	} else {
		priv->plat->max_speed = 1000;
		priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
		priv->plat->mdio_bus_data->xpcs_an_inband = true;
	}
}

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
/* Program PTP Clock Frequency for different variant of
 * Intel mGBE that has slightly different GPO mapping
 */
static void intel_mgbe_ptp_clk_freq_config(void *npriv)
{
	struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
	struct intel_priv_data *intel_priv;
	u32 gpio_value;

	intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;

	gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);

	if (intel_priv->is_pse) {
		/* For PSE GbE, use 200MHz */
		gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
		gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
	} else {
		/* For PCH GbE, use 200MHz */
		gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
		gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
	}

	writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
			u64 *art_time)
{
	u64 ns;

	ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
	ns <<= GMAC4_ART_TIME_SHIFT;
	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
	ns <<= GMAC4_ART_TIME_SHIFT;
	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
	ns <<= GMAC4_ART_TIME_SHIFT;
	ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);

	*art_time = ns;
}

static int intel_crosststamp(ktime_t *device,
			     struct system_counterval_t *system,
			     void *ctx)
{
	struct intel_priv_data *intel_priv;

	struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
	void __iomem *ptpaddr = priv->ptpaddr;
	void __iomem *ioaddr = priv->hw->pcsr;
	unsigned long flags;
	u64 art_time = 0;
	u64 ptp_time = 0;
	u32 num_snapshot;
	u32 gpio_value;
	u32 acr_value;
	int ret;
	u32 v;
	int i;

	if (!boot_cpu_has(X86_FEATURE_ART))
		return -EOPNOTSUPP;

	intel_priv = priv->plat->bsp_priv;

325 326 327 328 329 330 331
	/* Both internal crosstimestamping and external triggered event
	 * timestamping cannot be run concurrently.
	 */
	if (priv->plat->ext_snapshot_en)
		return -EBUSY;

	mutex_lock(&priv->aux_ts_lock);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
	/* Enable Internal snapshot trigger */
	acr_value = readl(ptpaddr + PTP_ACR);
	acr_value &= ~PTP_ACR_MASK;
	switch (priv->plat->int_snapshot_num) {
	case AUX_SNAPSHOT0:
		acr_value |= PTP_ACR_ATSEN0;
		break;
	case AUX_SNAPSHOT1:
		acr_value |= PTP_ACR_ATSEN1;
		break;
	case AUX_SNAPSHOT2:
		acr_value |= PTP_ACR_ATSEN2;
		break;
	case AUX_SNAPSHOT3:
		acr_value |= PTP_ACR_ATSEN3;
		break;
	default:
349
		mutex_unlock(&priv->aux_ts_lock);
350 351 352 353 354 355 356 357
		return -EINVAL;
	}
	writel(acr_value, ptpaddr + PTP_ACR);

	/* Clear FIFO */
	acr_value = readl(ptpaddr + PTP_ACR);
	acr_value |= PTP_ACR_ATSFC;
	writel(acr_value, ptpaddr + PTP_ACR);
358 359
	/* Release the mutex */
	mutex_unlock(&priv->aux_ts_lock);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

	/* Trigger Internal snapshot signal
	 * Create a rising edge by just toggle the GPO1 to low
	 * and back to high.
	 */
	gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
	gpio_value &= ~GMAC_GPO1;
	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
	gpio_value |= GMAC_GPO1;
	writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);

	/* Poll for time sync operation done */
	ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
				 (v & GMAC_INT_TSIE), 100, 10000);

	if (ret == -ETIMEDOUT) {
		pr_err("%s: Wait for time sync operation timeout\n", __func__);
		return ret;
	}

	num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
			GMAC_TIMESTAMP_ATSNS_MASK) >>
			GMAC_TIMESTAMP_ATSNS_SHIFT;

	/* Repeat until the timestamps are from the FIFO last segment */
	for (i = 0; i < num_snapshot; i++) {
386
		read_lock_irqsave(&priv->ptp_lock, flags);
387 388
		stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
		*device = ns_to_ktime(ptp_time);
389
		read_unlock_irqrestore(&priv->ptp_lock, flags);
390 391 392 393
		get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
		*system = convert_art_to_tsc(art_time);
	}

394 395
	system->cycles *= intel_priv->crossts_adj;

396 397 398
	return 0;
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
				       int base)
{
	if (boot_cpu_has(X86_FEATURE_ART)) {
		unsigned int art_freq;

		/* On systems that support ART, ART frequency can be obtained
		 * from ECX register of CPUID leaf (0x15).
		 */
		art_freq = cpuid_ecx(ART_CPUID_LEAF);
		do_div(art_freq, base);
		intel_priv->crossts_adj = art_freq;
	}
}

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
static void common_default_data(struct plat_stmmacenet_data *plat)
{
	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
	plat->has_gmac = 1;
	plat->force_sf_dma_mode = 1;

	plat->mdio_bus_data->needs_reset = true;

	/* Set default value for multicast hash bins */
	plat->multicast_filter_bins = HASH_TABLE_SIZE;

	/* Set default value for unicast filter entries */
	plat->unicast_filter_entries = 1;

	/* Set the maxmtu to a default of JUMBO_LEN */
	plat->maxmtu = JUMBO_LEN;

	/* Set default number of RX and TX queues to use */
	plat->tx_queues_to_use = 1;
	plat->rx_queues_to_use = 1;

	/* Disable Priority config by default */
	plat->tx_queues_cfg[0].use_prio = false;
	plat->rx_queues_cfg[0].use_prio = false;

	/* Disable RX queues routing by default */
	plat->rx_queues_cfg[0].pkt_route = 0x0;
}

static int intel_mgbe_common_data(struct pci_dev *pdev,
				  struct plat_stmmacenet_data *plat)
{
446
	char clk_name[20];
447
	int ret;
448 449
	int i;

450
	plat->pdev = pdev;
451
	plat->phy_addr = -1;
452 453 454 455 456 457
	plat->clk_csr = 5;
	plat->has_gmac = 0;
	plat->has_gmac4 = 1;
	plat->force_sf_dma_mode = 0;
	plat->tso_en = 1;

458 459 460 461 462 463 464 465 466 467 468
	/* Multiplying factor to the clk_eee_i clock time
	 * period to make it closer to 100 ns. This value
	 * should be programmed such that the clk_eee_time_period *
	 * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
	 * clk_eee frequency is 19.2Mhz
	 * clk_eee_time_period is 52ns
	 * 52ns * (1 + 1) = 104ns
	 * MULT_FACT_100NS = 1
	 */
	plat->mult_fact_100ns = 1;

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;

	for (i = 0; i < plat->rx_queues_to_use; i++) {
		plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
		plat->rx_queues_cfg[i].chan = i;

		/* Disable Priority config by default */
		plat->rx_queues_cfg[i].use_prio = false;

		/* Disable RX queues routing by default */
		plat->rx_queues_cfg[i].pkt_route = 0x0;
	}

	for (i = 0; i < plat->tx_queues_to_use; i++) {
		plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;

		/* Disable Priority config by default */
		plat->tx_queues_cfg[i].use_prio = false;
487 488 489
		/* Default TX Q0 to use TSO and rest TXQ for TBS */
		if (i > 0)
			plat->tx_queues_cfg[i].tbs_en = 1;
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	}

	/* FIFO size is 4096 bytes for 1 tx/rx queue */
	plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
	plat->rx_fifo_size = plat->rx_queues_to_use * 4096;

	plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
	plat->tx_queues_cfg[0].weight = 0x09;
	plat->tx_queues_cfg[1].weight = 0x0A;
	plat->tx_queues_cfg[2].weight = 0x0B;
	plat->tx_queues_cfg[3].weight = 0x0C;
	plat->tx_queues_cfg[4].weight = 0x0D;
	plat->tx_queues_cfg[5].weight = 0x0E;
	plat->tx_queues_cfg[6].weight = 0x0F;
	plat->tx_queues_cfg[7].weight = 0x10;

	plat->dma_cfg->pbl = 32;
	plat->dma_cfg->pblx8 = true;
	plat->dma_cfg->fixed_burst = 0;
	plat->dma_cfg->mixed_burst = 0;
	plat->dma_cfg->aal = 0;
511
	plat->dma_cfg->dche = true;
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526

	plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
				 GFP_KERNEL);
	if (!plat->axi)
		return -ENOMEM;

	plat->axi->axi_lpi_en = 0;
	plat->axi->axi_xit_frm = 0;
	plat->axi->axi_wr_osr_lmt = 1;
	plat->axi->axi_rd_osr_lmt = 1;
	plat->axi->axi_blen[0] = 4;
	plat->axi->axi_blen[1] = 8;
	plat->axi->axi_blen[2] = 16;

	plat->ptp_max_adj = plat->clk_ptp_rate;
527
	plat->eee_usecs_rate = plat->clk_ptp_rate;
528 529

	/* Set system clock */
530 531
	sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));

532
	plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
533
						   clk_name, NULL, 0,
534 535 536 537 538 539
						   plat->clk_ptp_rate);

	if (IS_ERR(plat->stmmac_clk)) {
		dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
		plat->stmmac_clk = NULL;
	}
540 541 542 543 544 545

	ret = clk_prepare_enable(plat->stmmac_clk);
	if (ret) {
		clk_unregister_fixed_rate(plat->stmmac_clk);
		return ret;
	}
546

547 548
	plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;

549 550 551 552 553 554 555 556 557
	/* Set default value for multicast hash bins */
	plat->multicast_filter_bins = HASH_TABLE_SIZE;

	/* Set default value for unicast filter entries */
	plat->unicast_filter_entries = 1;

	/* Set the maxmtu to a default of JUMBO_LEN */
	plat->maxmtu = JUMBO_LEN;

558 559 560 561 562
	plat->vlan_fail_q_en = true;

	/* Use the last Rx queue */
	plat->vlan_fail_q = plat->rx_queues_to_use - 1;

563 564 565 566 567 568 569 570 571 572
	/* Intel mgbe SGMII interface uses pcs-xcps */
	if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
		plat->mdio_bus_data->has_xpcs = true;
		plat->mdio_bus_data->xpcs_an_inband = true;
	}

	/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
	plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
	plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;

573
	plat->int_snapshot_num = AUX_SNAPSHOT1;
574
	plat->ext_snapshot_num = AUX_SNAPSHOT0;
575 576 577 578

	plat->has_crossts = true;
	plat->crosststamp = intel_crosststamp;

579 580 581 582 583 584 585 586
	/* Setup MSI vector offset specific to Intel mGbE controller */
	plat->msi_mac_vec = 29;
	plat->msi_lpi_vec = 28;
	plat->msi_sfty_ce_vec = 27;
	plat->msi_sfty_ue_vec = 26;
	plat->msi_rx_base_vec = 0;
	plat->msi_tx_base_vec = 1;

587 588 589 590 591 592 593 594 595
	return 0;
}

static int ehl_common_data(struct pci_dev *pdev,
			   struct plat_stmmacenet_data *plat)
{
	plat->rx_queues_to_use = 8;
	plat->tx_queues_to_use = 8;
	plat->clk_ptp_rate = 200000000;
596
	plat->use_phy_wol = 1;
597

598 599 600 601 602 603 604 605 606 607
	plat->safety_feat_cfg->tsoee = 1;
	plat->safety_feat_cfg->mrxpee = 1;
	plat->safety_feat_cfg->mestee = 1;
	plat->safety_feat_cfg->mrxee = 1;
	plat->safety_feat_cfg->mtxee = 1;
	plat->safety_feat_cfg->epsi = 0;
	plat->safety_feat_cfg->edpp = 0;
	plat->safety_feat_cfg->prtyen = 0;
	plat->safety_feat_cfg->tmouten = 0;

608
	return intel_mgbe_common_data(pdev, plat);
609 610 611 612 613 614 615
}

static int ehl_sgmii_data(struct pci_dev *pdev,
			  struct plat_stmmacenet_data *plat)
{
	plat->bus_id = 1;
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
616
	plat->speed_mode_2500 = intel_speed_mode_2500;
617 618 619
	plat->serdes_powerup = intel_serdes_powerup;
	plat->serdes_powerdown = intel_serdes_powerdown;

620 621 622
	return ehl_common_data(pdev, plat);
}

623
static struct stmmac_pci_info ehl_sgmii1g_info = {
624 625 626 627 628 629 630 631 632 633 634 635
	.setup = ehl_sgmii_data,
};

static int ehl_rgmii_data(struct pci_dev *pdev,
			  struct plat_stmmacenet_data *plat)
{
	plat->bus_id = 1;
	plat->phy_interface = PHY_INTERFACE_MODE_RGMII;

	return ehl_common_data(pdev, plat);
}

636
static struct stmmac_pci_info ehl_rgmii1g_info = {
637 638 639
	.setup = ehl_rgmii_data,
};

640 641 642
static int ehl_pse0_common_data(struct pci_dev *pdev,
				struct plat_stmmacenet_data *plat)
{
643 644 645
	struct intel_priv_data *intel_priv = plat->bsp_priv;

	intel_priv->is_pse = true;
646
	plat->bus_id = 2;
647
	plat->addr64 = 32;
648

649 650
	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);

651 652 653 654 655 656 657 658 659 660
	return ehl_common_data(pdev, plat);
}

static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
				 struct plat_stmmacenet_data *plat)
{
	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
	return ehl_pse0_common_data(pdev, plat);
}

661
static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
662 663 664 665 666 667 668
	.setup = ehl_pse0_rgmii1g_data,
};

static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
				 struct plat_stmmacenet_data *plat)
{
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
669
	plat->speed_mode_2500 = intel_speed_mode_2500;
670 671
	plat->serdes_powerup = intel_serdes_powerup;
	plat->serdes_powerdown = intel_serdes_powerdown;
672 673 674
	return ehl_pse0_common_data(pdev, plat);
}

675
static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
676 677 678 679 680 681
	.setup = ehl_pse0_sgmii1g_data,
};

static int ehl_pse1_common_data(struct pci_dev *pdev,
				struct plat_stmmacenet_data *plat)
{
682 683 684
	struct intel_priv_data *intel_priv = plat->bsp_priv;

	intel_priv->is_pse = true;
685
	plat->bus_id = 3;
686
	plat->addr64 = 32;
687

688 689
	intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);

690 691 692 693 694 695 696 697 698 699
	return ehl_common_data(pdev, plat);
}

static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
				 struct plat_stmmacenet_data *plat)
{
	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
	return ehl_pse1_common_data(pdev, plat);
}

700
static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
701 702 703 704 705 706 707
	.setup = ehl_pse1_rgmii1g_data,
};

static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
				 struct plat_stmmacenet_data *plat)
{
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
708
	plat->speed_mode_2500 = intel_speed_mode_2500;
709 710
	plat->serdes_powerup = intel_serdes_powerup;
	plat->serdes_powerdown = intel_serdes_powerdown;
711 712 713
	return ehl_pse1_common_data(pdev, plat);
}

714
static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
715 716 717
	.setup = ehl_pse1_sgmii1g_data,
};

718 719 720 721 722 723 724
static int tgl_common_data(struct pci_dev *pdev,
			   struct plat_stmmacenet_data *plat)
{
	plat->rx_queues_to_use = 6;
	plat->tx_queues_to_use = 4;
	plat->clk_ptp_rate = 200000000;

725 726 727 728 729 730 731 732 733 734
	plat->safety_feat_cfg->tsoee = 1;
	plat->safety_feat_cfg->mrxpee = 0;
	plat->safety_feat_cfg->mestee = 1;
	plat->safety_feat_cfg->mrxee = 1;
	plat->safety_feat_cfg->mtxee = 1;
	plat->safety_feat_cfg->epsi = 0;
	plat->safety_feat_cfg->edpp = 0;
	plat->safety_feat_cfg->prtyen = 0;
	plat->safety_feat_cfg->tmouten = 0;

735
	return intel_mgbe_common_data(pdev, plat);
736 737
}

738 739
static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
			       struct plat_stmmacenet_data *plat)
740 741 742
{
	plat->bus_id = 1;
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
743
	plat->speed_mode_2500 = intel_speed_mode_2500;
744 745
	plat->serdes_powerup = intel_serdes_powerup;
	plat->serdes_powerdown = intel_serdes_powerdown;
746 747 748
	return tgl_common_data(pdev, plat);
}

749 750
static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
	.setup = tgl_sgmii_phy0_data,
751 752
};

753 754 755 756 757
static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
			       struct plat_stmmacenet_data *plat)
{
	plat->bus_id = 2;
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
758
	plat->speed_mode_2500 = intel_speed_mode_2500;
759 760 761 762 763 764 765 766 767 768 769
	plat->serdes_powerup = intel_serdes_powerup;
	plat->serdes_powerdown = intel_serdes_powerdown;
	return tgl_common_data(pdev, plat);
}

static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
	.setup = tgl_sgmii_phy1_data,
};

static int adls_sgmii_phy0_data(struct pci_dev *pdev,
				struct plat_stmmacenet_data *plat)
770 771 772 773 774 775 776 777 778
{
	plat->bus_id = 1;
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;

	/* SerDes power up and power down are done in BIOS for ADL */

	return tgl_common_data(pdev, plat);
}

779 780
static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
	.setup = adls_sgmii_phy0_data,
781 782
};

783 784 785 786 787 788 789 790 791 792 793 794 795 796
static int adls_sgmii_phy1_data(struct pci_dev *pdev,
				struct plat_stmmacenet_data *plat)
{
	plat->bus_id = 2;
	plat->phy_interface = PHY_INTERFACE_MODE_SGMII;

	/* SerDes power up and power down are done in BIOS for ADL */

	return tgl_common_data(pdev, plat);
}

static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
	.setup = adls_sgmii_phy1_data,
};
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
	{
		.func = 6,
		.phy_addr = 1,
	},
};

static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
	.func = galileo_stmmac_func_data,
	.nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
};

static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
	{
		.func = 6,
		.phy_addr = 1,
	},
	{
		.func = 7,
		.phy_addr = 1,
	},
};

static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
	.func = iot2040_stmmac_func_data,
	.nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
};

static const struct dmi_system_id quark_pci_dmi[] = {
	{
		.matches = {
			DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
		},
		.driver_data = (void *)&galileo_stmmac_dmi_data,
	},
	{
		.matches = {
			DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
		},
		.driver_data = (void *)&galileo_stmmac_dmi_data,
	},
	/* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
	 * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
	 * has only one pci network device while other asset tags are
	 * for IOT2040 which has two.
	 */
	{
		.matches = {
			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
			DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
					"6ES7647-0AA00-0YA2"),
		},
		.driver_data = (void *)&galileo_stmmac_dmi_data,
	},
	{
		.matches = {
			DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
		},
		.driver_data = (void *)&iot2040_stmmac_dmi_data,
	},
	{}
};

static int quark_default_data(struct pci_dev *pdev,
			      struct plat_stmmacenet_data *plat)
{
	int ret;

	/* Set common default data first */
	common_default_data(plat);

	/* Refuse to load the driver and register net device if MAC controller
	 * does not connect to any PHY interface.
	 */
	ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
	if (ret < 0) {
		/* Return error to the caller on DMI enabled boards. */
		if (dmi_get_system_info(DMI_BOARD_NAME))
			return ret;

		/* Galileo boards with old firmware don't support DMI. We always
		 * use 1 here as PHY address, so at least the first found MAC
		 * controller would be probed.
		 */
		ret = 1;
	}

	plat->bus_id = pci_dev_id(pdev);
	plat->phy_addr = ret;
	plat->phy_interface = PHY_INTERFACE_MODE_RMII;

	plat->dma_cfg->pbl = 16;
	plat->dma_cfg->pblx8 = true;
	plat->dma_cfg->fixed_burst = 1;
	/* AXI (TODO) */

	return 0;
}

896
static const struct stmmac_pci_info quark_info = {
897 898 899
	.setup = quark_default_data,
};

900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
static int stmmac_config_single_msi(struct pci_dev *pdev,
				    struct plat_stmmacenet_data *plat,
				    struct stmmac_resources *res)
{
	int ret;

	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
	if (ret < 0) {
		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
			 __func__);
		return ret;
	}

	res->irq = pci_irq_vector(pdev, 0);
	res->wol_irq = res->irq;
	plat->multi_msi_en = 0;
	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
		 __func__);

	return 0;
}

static int stmmac_config_multi_msi(struct pci_dev *pdev,
				   struct plat_stmmacenet_data *plat,
				   struct stmmac_resources *res)
{
	int ret;
	int i;

	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
			 __func__);
		return -1;
	}

	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
	if (ret < 0) {
		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
			 __func__);
		return ret;
	}

	/* For RX MSI */
	for (i = 0; i < plat->rx_queues_to_use; i++) {
		res->rx_irq[i] = pci_irq_vector(pdev,
						plat->msi_rx_base_vec + i * 2);
	}

	/* For TX MSI */
	for (i = 0; i < plat->tx_queues_to_use; i++) {
		res->tx_irq[i] = pci_irq_vector(pdev,
						plat->msi_tx_base_vec + i * 2);
	}

	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);

	plat->multi_msi_en = 1;
	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);

	return 0;
}

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
/**
 * intel_eth_pci_probe
 *
 * @pdev: pci device pointer
 * @id: pointer to table of device id/id's.
 *
 * Description: This probing function gets called for all PCI devices which
 * match the ID table and are not "owned" by other driver yet. This function
 * gets passed a "struct pci_dev *" for each device whose entry in the ID table
 * matches the device. The probe functions returns zero when the driver choose
 * to take "ownership" of the device or an error code(-ve no) otherwise.
 */
static int intel_eth_pci_probe(struct pci_dev *pdev,
			       const struct pci_device_id *id)
{
	struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
989
	struct intel_priv_data *intel_priv;
990 991 992 993
	struct plat_stmmacenet_data *plat;
	struct stmmac_resources res;
	int ret;

994
	intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
995 996 997
	if (!intel_priv)
		return -ENOMEM;

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
	if (!plat)
		return -ENOMEM;

	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
					   sizeof(*plat->mdio_bus_data),
					   GFP_KERNEL);
	if (!plat->mdio_bus_data)
		return -ENOMEM;

	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
				     GFP_KERNEL);
	if (!plat->dma_cfg)
		return -ENOMEM;

1013 1014 1015 1016 1017 1018
	plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
					     sizeof(*plat->safety_feat_cfg),
					     GFP_KERNEL);
	if (!plat->safety_feat_cfg)
		return -ENOMEM;

1019
	/* Enable pci device */
1020
	ret = pcim_enable_device(pdev);
1021 1022 1023 1024 1025 1026
	if (ret) {
		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
			__func__);
		return ret;
	}

1027 1028 1029
	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
	if (ret)
		return ret;
1030 1031 1032

	pci_set_master(pdev);

1033
	plat->bsp_priv = intel_priv;
1034
	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
1035
	intel_priv->crossts_adj = 1;
1036

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	/* Initialize all MSI vectors to invalid so that it can be set
	 * according to platform data settings below.
	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
	 */
	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;

1049 1050 1051 1052 1053
	ret = info->setup(pdev, plat);
	if (ret)
		return ret;

	memset(&res, 0, sizeof(res));
1054
	res.addr = pcim_iomap_table(pdev)[0];
1055

1056 1057 1058 1059 1060 1061 1062
	if (plat->eee_usecs_rate > 0) {
		u32 tx_lpi_usec;

		tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
	}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	ret = stmmac_config_multi_msi(pdev, plat, &res);
	if (ret) {
		ret = stmmac_config_single_msi(pdev, plat, &res);
		if (ret) {
			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
				__func__);
			goto err_alloc_irq;
		}
	}

1073 1074
	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
	if (ret) {
1075
		goto err_dvr_probe;
1076 1077
	}

1078 1079 1080 1081 1082 1083 1084
	return 0;

err_dvr_probe:
	pci_free_irq_vectors(pdev);
err_alloc_irq:
	clk_disable_unprepare(plat->stmmac_clk);
	clk_unregister_fixed_rate(plat->stmmac_clk);
1085
	return ret;
1086 1087 1088 1089 1090
}

/**
 * intel_eth_pci_remove
 *
1091
 * @pdev: pci device pointer
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
 * Description: this function calls the main to free the net resources
 * and releases the PCI resources.
 */
static void intel_eth_pci_remove(struct pci_dev *pdev)
{
	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
	struct stmmac_priv *priv = netdev_priv(ndev);

	stmmac_dvr_remove(&pdev->dev);

1102
	clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1103

1104
	pcim_iounmap_regions(pdev, BIT(0));
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
}

static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	int ret;

	ret = stmmac_suspend(dev);
	if (ret)
		return ret;

	ret = pci_save_state(pdev);
	if (ret)
		return ret;

	pci_wake_from_d3(pdev, true);
1121
	pci_set_power_state(pdev, PCI_D3hot);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	return 0;
}

static int __maybe_unused intel_eth_pci_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	int ret;

	pci_restore_state(pdev);
	pci_set_power_state(pdev, PCI_D0);

1133
	ret = pcim_enable_device(pdev);
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	if (ret)
		return ret;

	pci_set_master(pdev);

	return stmmac_resume(dev);
}

static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
			 intel_eth_pci_resume);

1145 1146 1147 1148
#define PCI_DEVICE_ID_INTEL_QUARK		0x0937
#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G		0x4b30
#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G		0x4b31
#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5	0x4b32
1149 1150 1151
/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
 * which are named PSE0 and PSE1
 */
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G	0x4ba0
#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G	0x4ba1
#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5	0x4ba2
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G	0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G	0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5	0x4bb2
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0	0x43ac
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1	0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G		0xa0ac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0	0x7aac
#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1	0x7aad
1163 1164

static const struct pci_device_id intel_eth_pci_id_table[] = {
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	{ PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
	{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
	{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
	{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);

static struct pci_driver intel_eth_pci_driver = {
	.name = "intel-eth-pci",
	.id_table = intel_eth_pci_id_table,
	.probe = intel_eth_pci_probe,
	.remove = intel_eth_pci_remove,
	.driver         = {
		.pm     = &intel_eth_pm_ops,
	},
};

module_pci_driver(intel_eth_pci_driver);

MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
MODULE_LICENSE("GPL v2");