ravb_main.c 56.1 KB
Newer Older
1 2 3 4
/* Renesas Ethernet AVB device driver
 *
 * Copyright (C) 2014-2015 Renesas Electronics Corporation
 * Copyright (C) 2015 Renesas Solutions Corp.
5
 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * Based on the SuperH Ethernet driver
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License version 2,
 * as published by the Free Software Foundation.
 */

#include <linux/cache.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
34
#include <linux/sys_soc.h>
35

36 37
#include <asm/div64.h>

38 39 40 41 42 43 44 45
#include "ravb.h"

#define RAVB_DEF_MSG_ENABLE \
		(NETIF_MSG_LINK	  | \
		 NETIF_MSG_TIMER  | \
		 NETIF_MSG_RX_ERR | \
		 NETIF_MSG_TX_ERR)

46 47 48 49 50 51 52 53 54 55
static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
	"ch0", /* RAVB_BE */
	"ch1", /* RAVB_NC */
};

static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
	"ch18", /* RAVB_BE */
	"ch19", /* RAVB_NC */
};

56 57 58 59 60 61
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
		 u32 set)
{
	ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
}

62
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
{
	int i;

	for (i = 0; i < 10000; i++) {
		if ((ravb_read(ndev, reg) & mask) == value)
			return 0;
		udelay(10);
	}
	return -ETIMEDOUT;
}

static int ravb_config(struct net_device *ndev)
{
	int error;

	/* Set config mode */
79
	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
80 81 82 83 84 85 86 87 88 89 90 91
	/* Check if the operating mode is changed to the config mode */
	error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
	if (error)
		netdev_err(ndev, "failed to switch device to config mode\n");

	return error;
}

static void ravb_set_duplex(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

92
	ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
}

static void ravb_set_rate(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	switch (priv->speed) {
	case 100:		/* 100BASE */
		ravb_write(ndev, GECMR_SPEED_100, GECMR);
		break;
	case 1000:		/* 1000BASE */
		ravb_write(ndev, GECMR_SPEED_1000, GECMR);
		break;
	}
}

static void ravb_set_buffer_align(struct sk_buff *skb)
{
	u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);

	if (reserve)
		skb_reserve(skb, RAVB_ALIGN - reserve);
}

/* Get MAC address from the MAC address registers
 *
 * Ethernet AVB device doesn't have ROM for MAC address.
 * This function gets the MAC address that was used by a bootloader.
 */
static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
{
	if (mac) {
		ether_addr_copy(ndev->dev_addr, mac);
	} else {
127 128 129 130 131 132 133 134 135
		u32 mahr = ravb_read(ndev, MAHR);
		u32 malr = ravb_read(ndev, MALR);

		ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
		ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
		ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
		ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
		ndev->dev_addr[4] = (malr >>  8) & 0xFF;
		ndev->dev_addr[5] = (malr >>  0) & 0xFF;
136 137 138 139 140 141 142 143
	}
}

static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
{
	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
						 mdiobb);

144
	ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
}

/* MDC pin control */
static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
{
	ravb_mdio_ctrl(ctrl, PIR_MDC, level);
}

/* Data I/O pin control */
static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
{
	ravb_mdio_ctrl(ctrl, PIR_MMD, output);
}

/* Set data bit */
static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
{
	ravb_mdio_ctrl(ctrl, PIR_MDO, value);
}

/* Get data bit */
static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
{
	struct ravb_private *priv = container_of(ctrl, struct ravb_private,
						 mdiobb);

	return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
}

/* MDIO bus control struct */
static struct mdiobb_ops bb_ops = {
	.owner = THIS_MODULE,
	.set_mdc = ravb_set_mdc,
	.set_mdio_dir = ravb_set_mdio_dir,
	.set_mdio_data = ravb_set_mdio_data,
	.get_mdio_data = ravb_get_mdio_data,
};

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct net_device_stats *stats = &priv->stats[q];
	struct ravb_tx_desc *desc;
	int free_num = 0;
	int entry;
	u32 size;

	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
		bool txed;

		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
					     NUM_TX_DESC);
		desc = &priv->tx_ring[q][entry];
		txed = desc->die_dt == DT_FEMPTY;
		if (free_txed_only && !txed)
			break;
		/* Descriptor type must be checked before all other reads */
		dma_rmb();
		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
		/* Free the original skb. */
		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
					 size, DMA_TO_DEVICE);
			/* Last packet descriptor? */
			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
				entry /= NUM_TX_DESC;
				dev_kfree_skb_any(priv->tx_skb[q][entry]);
				priv->tx_skb[q][entry] = NULL;
				if (txed)
					stats->tx_packets++;
			}
			free_num++;
		}
		if (txed)
			stats->tx_bytes += size;
		desc->die_dt = DT_EEMPTY;
	}
	return free_num;
}

226 227 228 229 230 231 232 233
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int ring_size;
	int i;

	if (priv->rx_ring[q]) {
234 235 236 237 238 239 240 241 242 243
		for (i = 0; i < priv->num_rx_ring[q]; i++) {
			struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];

			if (!dma_mapping_error(ndev->dev.parent,
					       le32_to_cpu(desc->dptr)))
				dma_unmap_single(ndev->dev.parent,
						 le32_to_cpu(desc->dptr),
						 PKT_BUF_SZ,
						 DMA_FROM_DEVICE);
		}
244 245
		ring_size = sizeof(struct ravb_ex_rx_desc) *
			    (priv->num_rx_ring[q] + 1);
246
		dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
247 248 249 250 251
				  priv->rx_desc_dma[q]);
		priv->rx_ring[q] = NULL;
	}

	if (priv->tx_ring[q]) {
252 253
		ravb_tx_free(ndev, q, false);

254
		ring_size = sizeof(struct ravb_tx_desc) *
S
Sergei Shtylyov 已提交
255
			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
256
		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
257 258 259
				  priv->tx_desc_dma[q]);
		priv->tx_ring[q] = NULL;
	}
260

261 262 263 264 265 266 267 268 269 270 271 272
	/* Free RX skb ringbuffer */
	if (priv->rx_skb[q]) {
		for (i = 0; i < priv->num_rx_ring[q]; i++)
			dev_kfree_skb(priv->rx_skb[q][i]);
	}
	kfree(priv->rx_skb[q]);
	priv->rx_skb[q] = NULL;

	/* Free aligned TX buffers */
	kfree(priv->tx_align[q]);
	priv->tx_align[q] = NULL;

273 274 275 276 277
	/* Free TX skb ringbuffer.
	 * SKBs are freed by ravb_tx_free() call above.
	 */
	kfree(priv->tx_skb[q]);
	priv->tx_skb[q] = NULL;
278 279 280 281 282 283
}

/* Format skb and descriptor buffer for Ethernet AVB */
static void ravb_ring_format(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
284 285 286
	struct ravb_ex_rx_desc *rx_desc;
	struct ravb_tx_desc *tx_desc;
	struct ravb_desc *desc;
287
	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
S
Sergei Shtylyov 已提交
288 289
	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
			   NUM_TX_DESC;
290 291 292 293 294 295 296 297 298 299 300 301 302
	dma_addr_t dma_addr;
	int i;

	priv->cur_rx[q] = 0;
	priv->cur_tx[q] = 0;
	priv->dirty_rx[q] = 0;
	priv->dirty_tx[q] = 0;

	memset(priv->rx_ring[q], 0, rx_ring_size);
	/* Build RX ring buffer */
	for (i = 0; i < priv->num_rx_ring[q]; i++) {
		/* RX descriptor */
		rx_desc = &priv->rx_ring[q][i];
K
Kazuya Mizuguchi 已提交
303
		rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
304
		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
K
Kazuya Mizuguchi 已提交
305
					  PKT_BUF_SZ,
306
					  DMA_FROM_DEVICE);
307 308 309
		/* We just set the data size to 0 for a failed mapping which
		 * should prevent DMA from happening...
		 */
310
		if (dma_mapping_error(ndev->dev.parent, dma_addr))
311
			rx_desc->ds_cc = cpu_to_le16(0);
312 313 314 315 316 317 318 319 320
		rx_desc->dptr = cpu_to_le32(dma_addr);
		rx_desc->die_dt = DT_FEMPTY;
	}
	rx_desc = &priv->rx_ring[q][i];
	rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
	rx_desc->die_dt = DT_LINKFIX; /* type */

	memset(priv->tx_ring[q], 0, tx_ring_size);
	/* Build TX ring buffer */
S
Sergei Shtylyov 已提交
321 322 323 324
	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
	     i++, tx_desc++) {
		tx_desc->die_dt = DT_EEMPTY;
		tx_desc++;
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
		tx_desc->die_dt = DT_EEMPTY;
	}
	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
	tx_desc->die_dt = DT_LINKFIX; /* type */

	/* RX descriptor base address for best effort */
	desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
	desc->die_dt = DT_LINKFIX; /* type */
	desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);

	/* TX descriptor base address for best effort */
	desc = &priv->desc_bat[q];
	desc->die_dt = DT_LINKFIX; /* type */
	desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}

/* Init skb and descriptor buffer for Ethernet AVB */
static int ravb_ring_init(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
345
	struct sk_buff *skb;
346
	int ring_size;
347
	int i;
348 349 350 351 352 353 354 355 356

	/* Allocate RX and TX skb rings */
	priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
				  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
	priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
				  sizeof(*priv->tx_skb[q]), GFP_KERNEL);
	if (!priv->rx_skb[q] || !priv->tx_skb[q])
		goto error;

357 358 359 360 361 362 363 364
	for (i = 0; i < priv->num_rx_ring[q]; i++) {
		skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
		if (!skb)
			goto error;
		ravb_set_buffer_align(skb);
		priv->rx_skb[q][i] = skb;
	}

365
	/* Allocate rings for the aligned buffers */
S
Sergei Shtylyov 已提交
366 367 368
	priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
				    DPTR_ALIGN - 1, GFP_KERNEL);
	if (!priv->tx_align[q])
369 370 371 372
		goto error;

	/* Allocate all RX descriptors. */
	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
373
	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
374 375 376 377 378 379 380 381
					      &priv->rx_desc_dma[q],
					      GFP_KERNEL);
	if (!priv->rx_ring[q])
		goto error;

	priv->dirty_rx[q] = 0;

	/* Allocate all TX descriptors. */
S
Sergei Shtylyov 已提交
382 383
	ring_size = sizeof(struct ravb_tx_desc) *
		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
384
	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
					      &priv->tx_desc_dma[q],
					      GFP_KERNEL);
	if (!priv->tx_ring[q])
		goto error;

	return 0;

error:
	ravb_ring_free(ndev, q);

	return -ENOMEM;
}

/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	/* Receive frame limit set register */
	ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);

	/* PAUSE prohibition */
407 408
	ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
		   ECMR_TE | ECMR_RE, ECMR);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428

	ravb_set_rate(ndev);

	/* Set MAC address */
	ravb_write(ndev,
		   (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
		   (ndev->dev_addr[2] << 8)  | (ndev->dev_addr[3]), MAHR);
	ravb_write(ndev,
		   (ndev->dev_addr[4] << 8)  | (ndev->dev_addr[5]), MALR);

	/* E-MAC status register clear */
	ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);

	/* E-MAC interrupt enable register */
	ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}

/* Device init function for Ethernet AVB */
static int ravb_dmac_init(struct net_device *ndev)
{
429
	struct ravb_private *priv = netdev_priv(ndev);
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
	int error;

	/* Set CONFIG mode */
	error = ravb_config(ndev);
	if (error)
		return error;

	error = ravb_ring_init(ndev, RAVB_BE);
	if (error)
		return error;
	error = ravb_ring_init(ndev, RAVB_NC);
	if (error) {
		ravb_ring_free(ndev, RAVB_BE);
		return error;
	}

	/* Descriptor format */
	ravb_ring_format(ndev, RAVB_BE);
	ravb_ring_format(ndev, RAVB_NC);

#if defined(__LITTLE_ENDIAN)
451
	ravb_modify(ndev, CCC, CCC_BOC, 0);
452
#else
453
	ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
454 455 456
#endif

	/* Set AVB RX */
457 458
	ravb_write(ndev,
		   RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
459 460 461 462 463 464 465

	/* Set FIFO size */
	ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);

	/* Timestamp enable */
	ravb_write(ndev, TCCR_TFEN, TCCR);

466
	/* Interrupt init: */
467 468 469 470 471 472
	if (priv->chip_id == RCAR_GEN3) {
		/* Clear DIL.DPLx */
		ravb_write(ndev, 0, DIL);
		/* Set queue specific interrupt */
		ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
	}
473 474
	/* Frame receive */
	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
475 476
	/* Disable FIFO full warning */
	ravb_write(ndev, 0, RIC1);
477 478 479 480 481 482
	/* Receive FIFO full error, descriptor empty */
	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
	/* Frame transmitted, timestamp FIFO updated */
	ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);

	/* Setting the control will start the AVB-DMAC process. */
483
	ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

	return 0;
}

static void ravb_get_tx_tstamp(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
	struct skb_shared_hwtstamps shhwtstamps;
	struct sk_buff *skb;
	struct timespec64 ts;
	u16 tag, tfa_tag;
	int count;
	u32 tfa2;

	count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
	while (count--) {
		tfa2 = ravb_read(ndev, TFA2);
		tfa_tag = (tfa2 & TFA2_TST) >> 16;
		ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
		ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
			    ravb_read(ndev, TFA1);
		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
		shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
		list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
					 list) {
			skb = ts_skb->skb;
			tag = ts_skb->tag;
			list_del(&ts_skb->list);
			kfree(ts_skb);
			if (tag == tfa_tag) {
				skb_tstamp_tx(skb, &shhwtstamps);
				break;
			}
		}
519
		ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
	}
}

/* Packet receive function for Ethernet AVB */
static bool ravb_rx(struct net_device *ndev, int *quota, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
	int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
			priv->cur_rx[q];
	struct net_device_stats *stats = &priv->stats[q];
	struct ravb_ex_rx_desc *desc;
	struct sk_buff *skb;
	dma_addr_t dma_addr;
	struct timespec64 ts;
	u8  desc_status;
536
	u16 pkt_len;
537 538 539 540 541 542 543 544 545 546 547 548 549 550
	int limit;

	boguscnt = min(boguscnt, *quota);
	limit = boguscnt;
	desc = &priv->rx_ring[q][entry];
	while (desc->die_dt != DT_FEMPTY) {
		/* Descriptor type must be checked before all other reads */
		dma_rmb();
		desc_status = desc->msc;
		pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;

		if (--boguscnt < 0)
			break;

551 552 553 554
		/* We use 0-byte descriptors to mark the DMA mapping errors */
		if (!pkt_len)
			continue;

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
		if (desc_status & MSC_MC)
			stats->multicast++;

		if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
				   MSC_CEEF)) {
			stats->rx_errors++;
			if (desc_status & MSC_CRC)
				stats->rx_crc_errors++;
			if (desc_status & MSC_RFE)
				stats->rx_frame_errors++;
			if (desc_status & (MSC_RTLF | MSC_RTSF))
				stats->rx_length_errors++;
			if (desc_status & MSC_CEEF)
				stats->rx_missed_errors++;
		} else {
			u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;

			skb = priv->rx_skb[q][entry];
			priv->rx_skb[q][entry] = NULL;
574
			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
K
Kazuya Mizuguchi 已提交
575
					 PKT_BUF_SZ,
576
					 DMA_FROM_DEVICE);
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
			get_ts &= (q == RAVB_NC) ?
					RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
					~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
			if (get_ts) {
				struct skb_shared_hwtstamps *shhwtstamps;

				shhwtstamps = skb_hwtstamps(skb);
				memset(shhwtstamps, 0, sizeof(*shhwtstamps));
				ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
					     32) | le32_to_cpu(desc->ts_sl);
				ts.tv_nsec = le32_to_cpu(desc->ts_n);
				shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
			}
			skb_put(skb, pkt_len);
			skb->protocol = eth_type_trans(skb, ndev);
			napi_gro_receive(&priv->napi[q], skb);
			stats->rx_packets++;
			stats->rx_bytes += pkt_len;
		}

		entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
		desc = &priv->rx_ring[q][entry];
	}

	/* Refill the RX ring buffers. */
	for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
		entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
		desc = &priv->rx_ring[q][entry];
K
Kazuya Mizuguchi 已提交
605
		desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
606 607 608 609 610 611 612

		if (!priv->rx_skb[q][entry]) {
			skb = netdev_alloc_skb(ndev,
					       PKT_BUF_SZ + RAVB_ALIGN - 1);
			if (!skb)
				break;	/* Better luck next round. */
			ravb_set_buffer_align(skb);
613
			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
614 615 616
						  le16_to_cpu(desc->ds_cc),
						  DMA_FROM_DEVICE);
			skb_checksum_none_assert(skb);
617 618 619
			/* We just set the data size to 0 for a failed mapping
			 * which should prevent DMA  from happening...
			 */
620
			if (dma_mapping_error(ndev->dev.parent, dma_addr))
621
				desc->ds_cc = cpu_to_le16(0);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
			desc->dptr = cpu_to_le32(dma_addr);
			priv->rx_skb[q][entry] = skb;
		}
		/* Descriptor type must be set after all the above writes */
		dma_wmb();
		desc->die_dt = DT_FEMPTY;
	}

	*quota -= limit - (++boguscnt);

	return boguscnt <= 0;
}

static void ravb_rcv_snd_disable(struct net_device *ndev)
{
	/* Disable TX and RX */
638
	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
639 640 641 642 643
}

static void ravb_rcv_snd_enable(struct net_device *ndev)
{
	/* Enable TX and RX */
644
	ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
}

/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
	int error;

	/* Wait for stopping the hardware TX process */
	error = ravb_wait(ndev, TCCR,
			  TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
	if (error)
		return error;

	error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
			  0);
	if (error)
		return error;

	/* Stop the E-MAC's RX/TX processes. */
	ravb_rcv_snd_disable(ndev);

	/* Wait for stopping the RX DMA process */
	error = ravb_wait(ndev, CSR, CSR_RPO, 0);
	if (error)
		return error;

	/* Stop AVB-DMAC process */
	return ravb_config(ndev);
}

/* E-MAC interrupt handler */
676
static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
{
	struct ravb_private *priv = netdev_priv(ndev);
	u32 ecsr, psr;

	ecsr = ravb_read(ndev, ECSR);
	ravb_write(ndev, ecsr, ECSR);	/* clear interrupt */
	if (ecsr & ECSR_ICD)
		ndev->stats.tx_carrier_errors++;
	if (ecsr & ECSR_LCHNG) {
		/* Link changed */
		if (priv->no_avb_link)
			return;
		psr = ravb_read(ndev, PSR);
		if (priv->avb_link_active_low)
			psr ^= PSR_LMON;
		if (!(psr & PSR_LMON)) {
			/* DIsable RX and TX */
			ravb_rcv_snd_disable(ndev);
		} else {
			/* Enable RX and TX */
			ravb_rcv_snd_enable(ndev);
		}
	}
}

702 703 704 705 706 707 708 709 710 711 712 713
static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct ravb_private *priv = netdev_priv(ndev);

	spin_lock(&priv->lock);
	ravb_emac_interrupt_unlocked(ndev);
	mmiowb();
	spin_unlock(&priv->lock);
	return IRQ_HANDLED;
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/* Error interrupt handler */
static void ravb_error_interrupt(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	u32 eis, ris2;

	eis = ravb_read(ndev, EIS);
	ravb_write(ndev, ~EIS_QFS, EIS);
	if (eis & EIS_QFS) {
		ris2 = ravb_read(ndev, RIS2);
		ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);

		/* Receive Descriptor Empty int */
		if (ris2 & RIS2_QFF0)
			priv->stats[RAVB_BE].rx_over_errors++;

		    /* Receive Descriptor Empty int */
		if (ris2 & RIS2_QFF1)
			priv->stats[RAVB_NC].rx_over_errors++;

		/* Receive FIFO Overflow int */
		if (ris2 & RIS2_RFFF)
			priv->rx_fifo_errors++;
	}
}

740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
static bool ravb_queue_interrupt(struct net_device *ndev, int q)
{
	struct ravb_private *priv = netdev_priv(ndev);
	u32 ris0 = ravb_read(ndev, RIS0);
	u32 ric0 = ravb_read(ndev, RIC0);
	u32 tis  = ravb_read(ndev, TIS);
	u32 tic  = ravb_read(ndev, TIC);

	if (((ris0 & ric0) & BIT(q)) || ((tis  & tic)  & BIT(q))) {
		if (napi_schedule_prep(&priv->napi[q])) {
			/* Mask RX and TX interrupts */
			if (priv->chip_id == RCAR_GEN2) {
				ravb_write(ndev, ric0 & ~BIT(q), RIC0);
				ravb_write(ndev, tic & ~BIT(q), TIC);
			} else {
				ravb_write(ndev, BIT(q), RID0);
				ravb_write(ndev, BIT(q), TID);
			}
			__napi_schedule(&priv->napi[q]);
		} else {
			netdev_warn(ndev,
				    "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
				    ris0, ric0);
			netdev_warn(ndev,
				    "                    tx status 0x%08x, tx mask 0x%08x.\n",
				    tis, tic);
		}
		return true;
	}
	return false;
}

static bool ravb_timestamp_interrupt(struct net_device *ndev)
{
	u32 tis = ravb_read(ndev, TIS);

	if (tis & TIS_TFUF) {
		ravb_write(ndev, ~TIS_TFUF, TIS);
		ravb_get_tx_tstamp(ndev);
		return true;
	}
	return false;
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct ravb_private *priv = netdev_priv(ndev);
	irqreturn_t result = IRQ_NONE;
	u32 iss;

	spin_lock(&priv->lock);
	/* Get interrupt status */
	iss = ravb_read(ndev, ISS);

	/* Received and transmitted interrupts */
	if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
		int q;

		/* Timestamp updated */
800
		if (ravb_timestamp_interrupt(ndev))
801 802 803 804
			result = IRQ_HANDLED;

		/* Network control and best effort queue RX/TX */
		for (q = RAVB_NC; q >= RAVB_BE; q--) {
805
			if (ravb_queue_interrupt(ndev, q))
806 807 808 809 810 811
				result = IRQ_HANDLED;
		}
	}

	/* E-MAC status summary */
	if (iss & ISS_MS) {
812
		ravb_emac_interrupt_unlocked(ndev);
813 814 815 816 817 818 819 820 821
		result = IRQ_HANDLED;
	}

	/* Error status summary */
	if (iss & ISS_ES) {
		ravb_error_interrupt(ndev);
		result = IRQ_HANDLED;
	}

822
	/* gPTP interrupt status summary */
823 824
	if (iss & ISS_CGIS) {
		ravb_ptp_interrupt(ndev);
825
		result = IRQ_HANDLED;
826
	}
827

828 829 830 831 832
	mmiowb();
	spin_unlock(&priv->lock);
	return result;
}

833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
/* Timestamp/Error/gPTP interrupt handler */
static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
	struct net_device *ndev = dev_id;
	struct ravb_private *priv = netdev_priv(ndev);
	irqreturn_t result = IRQ_NONE;
	u32 iss;

	spin_lock(&priv->lock);
	/* Get interrupt status */
	iss = ravb_read(ndev, ISS);

	/* Timestamp updated */
	if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
		result = IRQ_HANDLED;

	/* Error status summary */
	if (iss & ISS_ES) {
		ravb_error_interrupt(ndev);
		result = IRQ_HANDLED;
	}

	/* gPTP interrupt status summary */
856 857
	if (iss & ISS_CGIS) {
		ravb_ptp_interrupt(ndev);
858
		result = IRQ_HANDLED;
859
	}
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892

	mmiowb();
	spin_unlock(&priv->lock);
	return result;
}

static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
	struct net_device *ndev = dev_id;
	struct ravb_private *priv = netdev_priv(ndev);
	irqreturn_t result = IRQ_NONE;

	spin_lock(&priv->lock);

	/* Network control/Best effort queue RX/TX */
	if (ravb_queue_interrupt(ndev, q))
		result = IRQ_HANDLED;

	mmiowb();
	spin_unlock(&priv->lock);
	return result;
}

static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
{
	return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
}

static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
{
	return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
}

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
static int ravb_poll(struct napi_struct *napi, int budget)
{
	struct net_device *ndev = napi->dev;
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned long flags;
	int q = napi - priv->napi;
	int mask = BIT(q);
	int quota = budget;
	u32 ris0, tis;

	for (;;) {
		tis = ravb_read(ndev, TIS);
		ris0 = ravb_read(ndev, RIS0);
		if (!((ris0 & mask) || (tis & mask)))
			break;

		/* Processing RX Descriptor Ring */
		if (ris0 & mask) {
			/* Clear RX interrupt */
			ravb_write(ndev, ~mask, RIS0);
			if (ravb_rx(ndev, &quota, q))
				goto out;
		}
		/* Processing TX Descriptor Ring */
		if (tis & mask) {
			spin_lock_irqsave(&priv->lock, flags);
			/* Clear TX interrupt */
			ravb_write(ndev, ~mask, TIS);
921
			ravb_tx_free(ndev, q, true);
922 923 924 925 926 927 928 929 930 931
			netif_wake_subqueue(ndev, q);
			mmiowb();
			spin_unlock_irqrestore(&priv->lock, flags);
		}
	}

	napi_complete(napi);

	/* Re-enable RX/TX interrupts */
	spin_lock_irqsave(&priv->lock, flags);
932 933 934 935 936 937 938
	if (priv->chip_id == RCAR_GEN2) {
		ravb_modify(ndev, RIC0, mask, mask);
		ravb_modify(ndev, TIC,  mask, mask);
	} else {
		ravb_write(ndev, mask, RIE0);
		ravb_write(ndev, mask, TIE);
	}
939 940 941 942 943 944
	mmiowb();
	spin_unlock_irqrestore(&priv->lock, flags);

	/* Receive error message handling */
	priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
	priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
945
	if (priv->rx_over_errors != ndev->stats.rx_over_errors)
946
		ndev->stats.rx_over_errors = priv->rx_over_errors;
947
	if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
948 949 950 951 952 953 954 955 956
		ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
out:
	return budget - quota;
}

/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
957
	struct phy_device *phydev = ndev->phydev;
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	bool new_state = false;

	if (phydev->link) {
		if (phydev->duplex != priv->duplex) {
			new_state = true;
			priv->duplex = phydev->duplex;
			ravb_set_duplex(ndev);
		}

		if (phydev->speed != priv->speed) {
			new_state = true;
			priv->speed = phydev->speed;
			ravb_set_rate(ndev);
		}
		if (!priv->link) {
973
			ravb_modify(ndev, ECMR, ECMR_TXF, 0);
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
			new_state = true;
			priv->link = phydev->link;
			if (priv->no_avb_link)
				ravb_rcv_snd_enable(ndev);
		}
	} else if (priv->link) {
		new_state = true;
		priv->link = 0;
		priv->speed = 0;
		priv->duplex = -1;
		if (priv->no_avb_link)
			ravb_rcv_snd_disable(ndev);
	}

	if (new_state && netif_msg_link(priv))
		phy_print_status(phydev);
}

992 993 994 995 996
static const struct soc_device_attribute r8a7795es10[] = {
	{ .soc_id = "r8a7795", .revision = "ES1.0", },
	{ /* sentinel */ }
};

997 998 999 1000 1001 1002 1003
/* PHY init function */
static int ravb_phy_init(struct net_device *ndev)
{
	struct device_node *np = ndev->dev.parent->of_node;
	struct ravb_private *priv = netdev_priv(ndev);
	struct phy_device *phydev;
	struct device_node *pn;
K
Kazuya Mizuguchi 已提交
1004
	int err;
1005 1006 1007 1008 1009 1010 1011

	priv->link = 0;
	priv->speed = 0;
	priv->duplex = -1;

	/* Try connecting to PHY */
	pn = of_parse_phandle(np, "phy-handle", 0);
K
Kazuya Mizuguchi 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	if (!pn) {
		/* In the case of a fixed PHY, the DT node associated
		 * to the PHY is the Ethernet MAC DT node.
		 */
		if (of_phy_is_fixed_link(np)) {
			err = of_phy_register_fixed_link(np);
			if (err)
				return err;
		}
		pn = of_node_get(np);
	}
1023 1024
	phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
				priv->phy_interface);
1025
	of_node_put(pn);
1026 1027
	if (!phydev) {
		netdev_err(ndev, "failed to connect PHY\n");
1028 1029
		err = -ENOENT;
		goto err_deregister_fixed_link;
1030 1031
	}

1032
	/* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
1033 1034
	 * at this time.
	 */
1035
	if (soc_device_match(r8a7795es10)) {
1036 1037 1038
		err = phy_set_max_speed(phydev, SPEED_100);
		if (err) {
			netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1039
			goto err_phy_disconnect;
1040 1041 1042 1043 1044
		}

		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
	}

K
Kazuya Mizuguchi 已提交
1045 1046 1047
	/* 10BASE is not supported */
	phydev->supported &= ~PHY_10BT_FEATURES;

1048
	phy_attached_info(phydev);
1049 1050

	return 0;
1051 1052 1053 1054 1055 1056 1057 1058

err_phy_disconnect:
	phy_disconnect(phydev);
err_deregister_fixed_link:
	if (of_phy_is_fixed_link(np))
		of_phy_deregister_fixed_link(np);

	return err;
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
}

/* PHY control start function */
static int ravb_phy_start(struct net_device *ndev)
{
	int error;

	error = ravb_phy_init(ndev);
	if (error)
		return error;

1070
	phy_start(ndev->phydev);
1071 1072 1073 1074

	return 0;
}

1075 1076
static int ravb_get_link_ksettings(struct net_device *ndev,
				   struct ethtool_link_ksettings *cmd)
1077 1078 1079 1080 1081
{
	struct ravb_private *priv = netdev_priv(ndev);
	int error = -ENODEV;
	unsigned long flags;

1082
	if (ndev->phydev) {
1083
		spin_lock_irqsave(&priv->lock, flags);
1084
		error = phy_ethtool_ksettings_get(ndev->phydev, cmd);
1085 1086 1087 1088 1089 1090
		spin_unlock_irqrestore(&priv->lock, flags);
	}

	return error;
}

1091 1092
static int ravb_set_link_ksettings(struct net_device *ndev,
				   const struct ethtool_link_ksettings *cmd)
1093 1094 1095 1096 1097
{
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned long flags;
	int error;

1098
	if (!ndev->phydev)
1099 1100 1101 1102 1103 1104 1105
		return -ENODEV;

	spin_lock_irqsave(&priv->lock, flags);

	/* Disable TX and RX */
	ravb_rcv_snd_disable(ndev);

1106
	error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1107 1108 1109
	if (error)
		goto error_exit;

1110
	if (cmd->base.duplex == DUPLEX_FULL)
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
		priv->duplex = 1;
	else
		priv->duplex = 0;

	ravb_set_duplex(ndev);

error_exit:
	mdelay(1);

	/* Enable TX and RX */
	ravb_rcv_snd_enable(ndev);

	mmiowb();
	spin_unlock_irqrestore(&priv->lock, flags);

	return error;
}

static int ravb_nway_reset(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int error = -ENODEV;
	unsigned long flags;

1135
	if (ndev->phydev) {
1136
		spin_lock_irqsave(&priv->lock, flags);
1137
		error = phy_start_aneg(ndev->phydev);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
		spin_unlock_irqrestore(&priv->lock, flags);
	}

	return error;
}

static u32 ravb_get_msglevel(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	return priv->msg_enable;
}

static void ravb_set_msglevel(struct net_device *ndev, u32 value)
{
	struct ravb_private *priv = netdev_priv(ndev);

	priv->msg_enable = value;
}

static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
	"rx_queue_0_current",
	"tx_queue_0_current",
	"rx_queue_0_dirty",
	"tx_queue_0_dirty",
	"rx_queue_0_packets",
	"tx_queue_0_packets",
	"rx_queue_0_bytes",
	"tx_queue_0_bytes",
	"rx_queue_0_mcast_packets",
	"rx_queue_0_errors",
	"rx_queue_0_crc_errors",
	"rx_queue_0_frame_errors",
	"rx_queue_0_length_errors",
	"rx_queue_0_missed_errors",
	"rx_queue_0_over_errors",

	"rx_queue_1_current",
	"tx_queue_1_current",
	"rx_queue_1_dirty",
	"tx_queue_1_dirty",
	"rx_queue_1_packets",
	"tx_queue_1_packets",
	"rx_queue_1_bytes",
	"tx_queue_1_bytes",
	"rx_queue_1_mcast_packets",
	"rx_queue_1_errors",
	"rx_queue_1_crc_errors",
1186
	"rx_queue_1_frame_errors",
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	"rx_queue_1_length_errors",
	"rx_queue_1_missed_errors",
	"rx_queue_1_over_errors",
};

#define RAVB_STATS_LEN	ARRAY_SIZE(ravb_gstrings_stats)

static int ravb_get_sset_count(struct net_device *netdev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return RAVB_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}

static void ravb_get_ethtool_stats(struct net_device *ndev,
				   struct ethtool_stats *stats, u64 *data)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int i = 0;
	int q;

	/* Device-specific stats */
	for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
		struct net_device_stats *stats = &priv->stats[q];

		data[i++] = priv->cur_rx[q];
		data[i++] = priv->cur_tx[q];
		data[i++] = priv->dirty_rx[q];
		data[i++] = priv->dirty_tx[q];
		data[i++] = stats->rx_packets;
		data[i++] = stats->tx_packets;
		data[i++] = stats->rx_bytes;
		data[i++] = stats->tx_bytes;
		data[i++] = stats->multicast;
		data[i++] = stats->rx_errors;
		data[i++] = stats->rx_crc_errors;
		data[i++] = stats->rx_frame_errors;
		data[i++] = stats->rx_length_errors;
		data[i++] = stats->rx_missed_errors;
		data[i++] = stats->rx_over_errors;
	}
}

static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
	switch (stringset) {
	case ETH_SS_STATS:
		memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
		break;
	}
}

static void ravb_get_ringparam(struct net_device *ndev,
			       struct ethtool_ringparam *ring)
{
	struct ravb_private *priv = netdev_priv(ndev);

	ring->rx_max_pending = BE_RX_RING_MAX;
	ring->tx_max_pending = BE_TX_RING_MAX;
	ring->rx_pending = priv->num_rx_ring[RAVB_BE];
	ring->tx_pending = priv->num_tx_ring[RAVB_BE];
}

static int ravb_set_ringparam(struct net_device *ndev,
			      struct ethtool_ringparam *ring)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int error;

	if (ring->tx_pending > BE_TX_RING_MAX ||
	    ring->rx_pending > BE_RX_RING_MAX ||
	    ring->tx_pending < BE_TX_RING_MIN ||
	    ring->rx_pending < BE_RX_RING_MIN)
		return -EINVAL;
	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
		return -EINVAL;

	if (netif_running(ndev)) {
		netif_device_detach(ndev);
1269
		/* Stop PTP Clock driver */
1270 1271
		if (priv->chip_id == RCAR_GEN2)
			ravb_ptp_stop(ndev);
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
		/* Wait for DMA stopping */
		error = ravb_stop_dma(ndev);
		if (error) {
			netdev_err(ndev,
				   "cannot set ringparam! Any AVB processes are still running?\n");
			return error;
		}
		synchronize_irq(ndev->irq);

		/* Free all the skb's in the RX queue and the DMA buffers. */
		ravb_ring_free(ndev, RAVB_BE);
		ravb_ring_free(ndev, RAVB_NC);
	}

	/* Set new parameters */
	priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
	priv->num_tx_ring[RAVB_BE] = ring->tx_pending;

	if (netif_running(ndev)) {
		error = ravb_dmac_init(ndev);
		if (error) {
			netdev_err(ndev,
				   "%s: ravb_dmac_init() failed, error %d\n",
				   __func__, error);
			return error;
		}

		ravb_emac_init(ndev);

1301
		/* Initialise PTP Clock driver */
1302 1303
		if (priv->chip_id == RCAR_GEN2)
			ravb_ptp_init(ndev, priv->pdev);
1304

1305 1306 1307 1308 1309 1310 1311 1312 1313
		netif_device_attach(ndev);
	}

	return 0;
}

static int ravb_get_ts_info(struct net_device *ndev,
			    struct ethtool_ts_info *info)
{
1314 1315
	struct ravb_private *priv = netdev_priv(ndev);

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	info->so_timestamping =
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE |
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
		SOF_TIMESTAMPING_RAW_HARDWARE;
	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
		(1 << HWTSTAMP_FILTER_ALL);
1328
	info->phc_index = ptp_clock_index(priv->ptp.clock);
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343

	return 0;
}

static const struct ethtool_ops ravb_ethtool_ops = {
	.nway_reset		= ravb_nway_reset,
	.get_msglevel		= ravb_get_msglevel,
	.set_msglevel		= ravb_set_msglevel,
	.get_link		= ethtool_op_get_link,
	.get_strings		= ravb_get_strings,
	.get_ethtool_stats	= ravb_get_ethtool_stats,
	.get_sset_count		= ravb_get_sset_count,
	.get_ringparam		= ravb_get_ringparam,
	.set_ringparam		= ravb_set_ringparam,
	.get_ts_info		= ravb_get_ts_info,
1344 1345
	.get_link_ksettings	= ravb_get_link_ksettings,
	.set_link_ksettings	= ravb_set_link_ksettings,
1346 1347
};

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
				struct net_device *ndev, struct device *dev,
				const char *ch)
{
	char *name;
	int error;

	name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
	if (!name)
		return -ENOMEM;
	error = request_irq(irq, handler, 0, name, ndev);
	if (error)
		netdev_err(ndev, "cannot request IRQ %s\n", name);

	return error;
}

1365 1366 1367 1368
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
1369 1370
	struct platform_device *pdev = priv->pdev;
	struct device *dev = &pdev->dev;
1371 1372 1373 1374 1375
	int error;

	napi_enable(&priv->napi[RAVB_BE]);
	napi_enable(&priv->napi[RAVB_NC]);

1376 1377 1378
	if (priv->chip_id == RCAR_GEN2) {
		error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
				    ndev->name, ndev);
1379 1380
		if (error) {
			netdev_err(ndev, "cannot request IRQ\n");
1381
			goto out_napi_off;
1382
		}
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
	} else {
		error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
				      dev, "ch22:multi");
		if (error)
			goto out_napi_off;
		error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
				      dev, "ch24:emac");
		if (error)
			goto out_free_irq;
		error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
				      ndev, dev, "ch0:rx_be");
		if (error)
			goto out_free_irq_emac;
		error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
				      ndev, dev, "ch18:tx_be");
		if (error)
			goto out_free_irq_be_rx;
		error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
				      ndev, dev, "ch1:rx_nc");
		if (error)
			goto out_free_irq_be_tx;
		error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
				      ndev, dev, "ch19:tx_nc");
		if (error)
			goto out_free_irq_nc_rx;
1408 1409
	}

1410 1411 1412
	/* Device init */
	error = ravb_dmac_init(ndev);
	if (error)
1413
		goto out_free_irq_nc_tx;
1414 1415
	ravb_emac_init(ndev);

1416
	/* Initialise PTP Clock driver */
1417 1418
	if (priv->chip_id == RCAR_GEN2)
		ravb_ptp_init(ndev, priv->pdev);
1419

1420 1421 1422 1423 1424
	netif_tx_start_all_queues(ndev);

	/* PHY control start */
	error = ravb_phy_start(ndev);
	if (error)
1425
		goto out_ptp_stop;
1426 1427 1428

	return 0;

1429 1430
out_ptp_stop:
	/* Stop PTP Clock driver */
1431 1432
	if (priv->chip_id == RCAR_GEN2)
		ravb_ptp_stop(ndev);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
out_free_irq_nc_tx:
	if (priv->chip_id == RCAR_GEN2)
		goto out_free_irq;
	free_irq(priv->tx_irqs[RAVB_NC], ndev);
out_free_irq_nc_rx:
	free_irq(priv->rx_irqs[RAVB_NC], ndev);
out_free_irq_be_tx:
	free_irq(priv->tx_irqs[RAVB_BE], ndev);
out_free_irq_be_rx:
	free_irq(priv->rx_irqs[RAVB_BE], ndev);
out_free_irq_emac:
	free_irq(priv->emac_irq, ndev);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
out_free_irq:
	free_irq(ndev->irq, ndev);
out_napi_off:
	napi_disable(&priv->napi[RAVB_NC]);
	napi_disable(&priv->napi[RAVB_BE]);
	return error;
}

/* Timeout function for Ethernet AVB */
static void ravb_tx_timeout(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	netif_err(priv, tx_err, ndev,
		  "transmit timed out, status %08x, resetting...\n",
		  ravb_read(ndev, ISS));

	/* tx_errors count up */
	ndev->stats.tx_errors++;

	schedule_work(&priv->work);
}

static void ravb_tx_timeout_work(struct work_struct *work)
{
	struct ravb_private *priv = container_of(work, struct ravb_private,
						 work);
	struct net_device *ndev = priv->ndev;

	netif_tx_stop_all_queues(ndev);

1476
	/* Stop PTP Clock driver */
1477 1478
	if (priv->chip_id == RCAR_GEN2)
		ravb_ptp_stop(ndev);
1479

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
	/* Wait for DMA stopping */
	ravb_stop_dma(ndev);

	ravb_ring_free(ndev, RAVB_BE);
	ravb_ring_free(ndev, RAVB_NC);

	/* Device init */
	ravb_dmac_init(ndev);
	ravb_emac_init(ndev);

1490
	/* Initialise PTP Clock driver */
1491 1492
	if (priv->chip_id == RCAR_GEN2)
		ravb_ptp_init(ndev, priv->pdev);
1493

1494 1495 1496 1497 1498 1499 1500 1501
	netif_tx_start_all_queues(ndev);
}

/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	u16 q = skb_get_queue_mapping(skb);
1502
	struct ravb_tstamp_skb *ts_skb;
1503 1504 1505 1506 1507
	struct ravb_tx_desc *desc;
	unsigned long flags;
	u32 dma_addr;
	void *buffer;
	u32 entry;
S
Sergei Shtylyov 已提交
1508
	u32 len;
1509 1510

	spin_lock_irqsave(&priv->lock, flags);
S
Sergei Shtylyov 已提交
1511 1512
	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
	    NUM_TX_DESC) {
1513 1514 1515 1516 1517 1518 1519 1520
		netif_err(priv, tx_queued, ndev,
			  "still transmitting with the full ring!\n");
		netif_stop_subqueue(ndev, q);
		spin_unlock_irqrestore(&priv->lock, flags);
		return NETDEV_TX_BUSY;
	}

	if (skb_put_padto(skb, ETH_ZLEN))
1521 1522 1523 1524
		goto exit;

	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
	priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
1525

S
Sergei Shtylyov 已提交
1526 1527 1528
	buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
		 entry / NUM_TX_DESC * DPTR_ALIGN;
	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
	/* Zero length DMA descriptors are problematic as they seem to
	 * terminate DMA transfers. Avoid them by simply using a length of
	 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
	 *
	 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
	 * data by the call to skb_put_padto() above this is safe with
	 * respect to both the length of the first DMA descriptor (len)
	 * overflowing the available data and the length of the second DMA
	 * descriptor (skb->len - len) being negative.
	 */
	if (len == 0)
		len = DPTR_ALIGN;

S
Sergei Shtylyov 已提交
1542
	memcpy(buffer, skb->data, len);
1543 1544
	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
	if (dma_mapping_error(ndev->dev.parent, dma_addr))
1545
		goto drop;
S
Sergei Shtylyov 已提交
1546 1547 1548 1549 1550 1551 1552

	desc = &priv->tx_ring[q][entry];
	desc->ds_tagl = cpu_to_le16(len);
	desc->dptr = cpu_to_le32(dma_addr);

	buffer = skb->data + len;
	len = skb->len - len;
1553 1554
	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
	if (dma_mapping_error(ndev->dev.parent, dma_addr))
S
Sergei Shtylyov 已提交
1555 1556 1557 1558
		goto unmap;

	desc++;
	desc->ds_tagl = cpu_to_le16(len);
1559 1560 1561 1562 1563 1564
	desc->dptr = cpu_to_le32(dma_addr);

	/* TX timestamp required */
	if (q == RAVB_NC) {
		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
		if (!ts_skb) {
S
Sergei Shtylyov 已提交
1565
			desc--;
1566
			dma_unmap_single(ndev->dev.parent, dma_addr, len,
1567
					 DMA_TO_DEVICE);
S
Sergei Shtylyov 已提交
1568
			goto unmap;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
		}
		ts_skb->skb = skb;
		ts_skb->tag = priv->ts_skb_tag++;
		priv->ts_skb_tag &= 0x3ff;
		list_add_tail(&ts_skb->list, &priv->ts_skb_list);

		/* TAG and timestamp required flag */
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
		desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
		desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
	}

1581
	skb_tx_timestamp(skb);
1582 1583
	/* Descriptor type must be set after all the above writes */
	dma_wmb();
S
Sergei Shtylyov 已提交
1584 1585 1586
	desc->die_dt = DT_FEND;
	desc--;
	desc->die_dt = DT_FSTART;
1587

1588
	ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
1589

S
Sergei Shtylyov 已提交
1590 1591
	priv->cur_tx[q] += NUM_TX_DESC;
	if (priv->cur_tx[q] - priv->dirty_tx[q] >
1592 1593
	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
	    !ravb_tx_free(ndev, q, true))
1594 1595 1596 1597 1598 1599 1600
		netif_stop_subqueue(ndev, q);

exit:
	mmiowb();
	spin_unlock_irqrestore(&priv->lock, flags);
	return NETDEV_TX_OK;

S
Sergei Shtylyov 已提交
1601
unmap:
1602
	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
S
Sergei Shtylyov 已提交
1603
			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
1604 1605
drop:
	dev_kfree_skb_any(skb);
S
Sergei Shtylyov 已提交
1606
	priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	goto exit;
}

static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
			     void *accel_priv, select_queue_fallback_t fallback)
{
	/* If skb needs TX timestamp, it is handled in network control queue */
	return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
							       RAVB_BE;

}

static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct net_device_stats *nstats, *stats0, *stats1;

	nstats = &ndev->stats;
	stats0 = &priv->stats[RAVB_BE];
	stats1 = &priv->stats[RAVB_NC];

	nstats->tx_dropped += ravb_read(ndev, TROCR);
	ravb_write(ndev, 0, TROCR);	/* (write clear) */
	nstats->collisions += ravb_read(ndev, CDCR);
	ravb_write(ndev, 0, CDCR);	/* (write clear) */
	nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
	ravb_write(ndev, 0, LCCR);	/* (write clear) */

	nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
	ravb_write(ndev, 0, CERCR);	/* (write clear) */
	nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
	ravb_write(ndev, 0, CEECR);	/* (write clear) */

	nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
	nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
	nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
	nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
	nstats->multicast = stats0->multicast + stats1->multicast;
	nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
	nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
	nstats->rx_frame_errors =
		stats0->rx_frame_errors + stats1->rx_frame_errors;
	nstats->rx_length_errors =
		stats0->rx_length_errors + stats1->rx_length_errors;
	nstats->rx_missed_errors =
		stats0->rx_missed_errors + stats1->rx_missed_errors;
	nstats->rx_over_errors =
		stats0->rx_over_errors + stats1->rx_over_errors;

	return nstats;
}

/* Update promiscuous bit */
static void ravb_set_rx_mode(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	unsigned long flags;

	spin_lock_irqsave(&priv->lock, flags);
1666 1667
	ravb_modify(ndev, ECMR, ECMR_PRM,
		    ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
1668 1669 1670 1671 1672 1673 1674
	mmiowb();
	spin_unlock_irqrestore(&priv->lock, flags);
}

/* Device close function for Ethernet AVB */
static int ravb_close(struct net_device *ndev)
{
1675
	struct device_node *np = ndev->dev.parent->of_node;
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	struct ravb_private *priv = netdev_priv(ndev);
	struct ravb_tstamp_skb *ts_skb, *ts_skb2;

	netif_tx_stop_all_queues(ndev);

	/* Disable interrupts by clearing the interrupt masks. */
	ravb_write(ndev, 0, RIC0);
	ravb_write(ndev, 0, RIC2);
	ravb_write(ndev, 0, TIC);

1686
	/* Stop PTP Clock driver */
1687 1688
	if (priv->chip_id == RCAR_GEN2)
		ravb_ptp_stop(ndev);
1689

1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
	/* Set the config mode to stop the AVB-DMAC's processes */
	if (ravb_stop_dma(ndev) < 0)
		netdev_err(ndev,
			   "device will be stopped after h/w processes are done.\n");

	/* Clear the timestamp list */
	list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
		list_del(&ts_skb->list);
		kfree(ts_skb);
	}

	/* PHY disconnect */
1702 1703 1704
	if (ndev->phydev) {
		phy_stop(ndev->phydev);
		phy_disconnect(ndev->phydev);
1705 1706
		if (of_phy_is_fixed_link(np))
			of_phy_deregister_fixed_link(np);
1707 1708
	}

1709 1710 1711 1712 1713
	if (priv->chip_id != RCAR_GEN2) {
		free_irq(priv->tx_irqs[RAVB_NC], ndev);
		free_irq(priv->rx_irqs[RAVB_NC], ndev);
		free_irq(priv->tx_irqs[RAVB_BE], ndev);
		free_irq(priv->rx_irqs[RAVB_BE], ndev);
1714
		free_irq(priv->emac_irq, ndev);
1715
	}
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
	free_irq(ndev->irq, ndev);

	napi_disable(&priv->napi[RAVB_NC]);
	napi_disable(&priv->napi[RAVB_BE]);

	/* Free all the skb's in the RX queue and the DMA buffers. */
	ravb_ring_free(ndev, RAVB_BE);
	ravb_ring_free(ndev, RAVB_NC);

	return 0;
}

static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct hwtstamp_config config;

	config.flags = 0;
	config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
						HWTSTAMP_TX_OFF;
	if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
	else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
		config.rx_filter = HWTSTAMP_FILTER_ALL;
	else
		config.rx_filter = HWTSTAMP_FILTER_NONE;

	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

/* Control hardware time stamping */
static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
{
	struct ravb_private *priv = netdev_priv(ndev);
	struct hwtstamp_config config;
	u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
	u32 tstamp_tx_ctrl;

	if (copy_from_user(&config, req->ifr_data, sizeof(config)))
		return -EFAULT;

	/* Reserved for future extensions */
	if (config.flags)
		return -EINVAL;

	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		tstamp_tx_ctrl = 0;
		break;
	case HWTSTAMP_TX_ON:
		tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
		break;
	default:
		return -ERANGE;
	}

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		tstamp_rx_ctrl = 0;
		break;
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
		break;
	default:
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
	}

	priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
	priv->tstamp_rx_ctrl = tstamp_rx_ctrl;

	return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

/* ioctl to device function */
static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
1795
	struct phy_device *phydev = ndev->phydev;
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870

	if (!netif_running(ndev))
		return -EINVAL;

	if (!phydev)
		return -ENODEV;

	switch (cmd) {
	case SIOCGHWTSTAMP:
		return ravb_hwtstamp_get(ndev, req);
	case SIOCSHWTSTAMP:
		return ravb_hwtstamp_set(ndev, req);
	}

	return phy_mii_ioctl(phydev, req, cmd);
}

static const struct net_device_ops ravb_netdev_ops = {
	.ndo_open		= ravb_open,
	.ndo_stop		= ravb_close,
	.ndo_start_xmit		= ravb_start_xmit,
	.ndo_select_queue	= ravb_select_queue,
	.ndo_get_stats		= ravb_get_stats,
	.ndo_set_rx_mode	= ravb_set_rx_mode,
	.ndo_tx_timeout		= ravb_tx_timeout,
	.ndo_do_ioctl		= ravb_do_ioctl,
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_set_mac_address	= eth_mac_addr,
};

/* MDIO bus init function */
static int ravb_mdio_init(struct ravb_private *priv)
{
	struct platform_device *pdev = priv->pdev;
	struct device *dev = &pdev->dev;
	int error;

	/* Bitbang init */
	priv->mdiobb.ops = &bb_ops;

	/* MII controller setting */
	priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
	if (!priv->mii_bus)
		return -ENOMEM;

	/* Hook up MII support for ethtool */
	priv->mii_bus->name = "ravb_mii";
	priv->mii_bus->parent = dev;
	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
		 pdev->name, pdev->id);

	/* Register MDIO bus */
	error = of_mdiobus_register(priv->mii_bus, dev->of_node);
	if (error)
		goto out_free_bus;

	return 0;

out_free_bus:
	free_mdio_bitbang(priv->mii_bus);
	return error;
}

/* MDIO bus release function */
static int ravb_mdio_release(struct ravb_private *priv)
{
	/* Unregister mdio bus */
	mdiobus_unregister(priv->mii_bus);

	/* Free bitbang info */
	free_mdio_bitbang(priv->mii_bus);

	return 0;
}

1871 1872 1873
static const struct of_device_id ravb_match_table[] = {
	{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
	{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
1874
	{ .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 },
1875
	{ .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
1876
	{ .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 },
1877 1878 1879 1880
	{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);

1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
static int ravb_set_gti(struct net_device *ndev)
{

	struct device *dev = ndev->dev.parent;
	struct device_node *np = dev->of_node;
	unsigned long rate;
	struct clk *clk;
	uint64_t inc;

	clk = of_clk_get(np, 0);
	if (IS_ERR(clk)) {
		dev_err(dev, "could not get clock\n");
		return PTR_ERR(clk);
	}

	rate = clk_get_rate(clk);
	clk_put(clk);

1899 1900 1901
	if (!rate)
		return -EINVAL;

1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
	inc = 1000000000ULL << 20;
	do_div(inc, rate);

	if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
		dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
			inc, GTI_TIV_MIN, GTI_TIV_MAX);
		return -EINVAL;
	}

	ravb_write(ndev, inc, GTI);

	return 0;
}

1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
static void ravb_set_config_mode(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);

	if (priv->chip_id == RCAR_GEN2) {
		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
		/* Set CSEL value */
		ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
	} else {
		ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
			    CCC_GAC | CCC_CSEL_HPB);
	}
}

1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
/* Set tx and rx clock internal delay modes */
static void ravb_set_delay_mode(struct net_device *ndev)
{
	struct ravb_private *priv = netdev_priv(ndev);
	int set = 0;

	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
		set |= APSR_DM_RDM;

	if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
	    priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
		set |= APSR_DM_TDM;

	ravb_modify(ndev, APSR, APSR_DM, set);
}

1947 1948 1949 1950
static int ravb_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ravb_private *priv;
1951
	enum ravb_chip_id chip_id;
1952 1953 1954
	struct net_device *ndev;
	int error, irq, q;
	struct resource *res;
1955
	int i;
1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979

	if (!np) {
		dev_err(&pdev->dev,
			"this driver is required to be instantiated from device tree\n");
		return -EINVAL;
	}

	/* Get base address */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "invalid resource\n");
		return -EINVAL;
	}

	ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
				  NUM_TX_QUEUE, NUM_RX_QUEUE);
	if (!ndev)
		return -ENOMEM;

	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	/* The Ether-specific entries in the device structure. */
	ndev->base_addr = res->start;
1980

1981
	chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
1982 1983 1984 1985 1986

	if (chip_id == RCAR_GEN3)
		irq = platform_get_irq_byname(pdev, "ch22");
	else
		irq = platform_get_irq(pdev, 0);
1987
	if (irq < 0) {
1988
		error = irq;
1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
		goto out_release;
	}
	ndev->irq = irq;

	SET_NETDEV_DEV(ndev, &pdev->dev);

	priv = netdev_priv(ndev);
	priv->ndev = ndev;
	priv->pdev = pdev;
	priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
	priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
	priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
	priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
	priv->addr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(priv->addr)) {
		error = PTR_ERR(priv->addr);
		goto out_release;
	}

	spin_lock_init(&priv->lock);
	INIT_WORK(&priv->work, ravb_tx_timeout_work);

	priv->phy_interface = of_get_phy_mode(np);

	priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
	priv->avb_link_active_low =
		of_property_read_bool(np, "renesas,ether-link-active-low");

2017 2018 2019 2020 2021 2022 2023
	if (chip_id == RCAR_GEN3) {
		irq = platform_get_irq_byname(pdev, "ch24");
		if (irq < 0) {
			error = irq;
			goto out_release;
		}
		priv->emac_irq = irq;
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039
		for (i = 0; i < NUM_RX_QUEUE; i++) {
			irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
			if (irq < 0) {
				error = irq;
				goto out_release;
			}
			priv->rx_irqs[i] = irq;
		}
		for (i = 0; i < NUM_TX_QUEUE; i++) {
			irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
			if (irq < 0) {
				error = irq;
				goto out_release;
			}
			priv->tx_irqs[i] = irq;
		}
2040 2041 2042 2043
	}

	priv->chip_id = chip_id;

2044 2045 2046 2047 2048
	/* Set function */
	ndev->netdev_ops = &ravb_netdev_ops;
	ndev->ethtool_ops = &ravb_ethtool_ops;

	/* Set AVB config mode */
2049
	ravb_set_config_mode(ndev);
2050 2051

	/* Set GTI value */
2052 2053 2054
	error = ravb_set_gti(ndev);
	if (error)
		goto out_release;
2055 2056

	/* Request GTI loading */
2057
	ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
2058

2059 2060 2061
	if (priv->chip_id != RCAR_GEN2)
		ravb_set_delay_mode(ndev);

2062 2063
	/* Allocate descriptor base address table */
	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
2064
	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
2065 2066
					    &priv->desc_bat_dma, GFP_KERNEL);
	if (!priv->desc_bat) {
2067
		dev_err(&pdev->dev,
2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
			"Cannot allocate desc base address table (size %d bytes)\n",
			priv->desc_bat_size);
		error = -ENOMEM;
		goto out_release;
	}
	for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
		priv->desc_bat[q].die_dt = DT_EOS;
	ravb_write(ndev, priv->desc_bat_dma, DBAT);

	/* Initialise HW timestamp list */
	INIT_LIST_HEAD(&priv->ts_skb_list);

2080 2081 2082 2083
	/* Initialise PTP Clock driver */
	if (chip_id != RCAR_GEN2)
		ravb_ptp_init(ndev, pdev);

2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
	/* Debug message level */
	priv->msg_enable = RAVB_DEF_MSG_ENABLE;

	/* Read and set MAC address */
	ravb_read_mac_address(ndev, of_get_mac_address(np));
	if (!is_valid_ether_addr(ndev->dev_addr)) {
		dev_warn(&pdev->dev,
			 "no valid MAC address supplied, using a random one\n");
		eth_hw_addr_random(ndev);
	}

	/* MDIO bus init */
	error = ravb_mdio_init(priv);
	if (error) {
2098
		dev_err(&pdev->dev, "failed to initialize MDIO\n");
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
		goto out_dma_free;
	}

	netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
	netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);

	/* Network device register */
	error = register_netdev(ndev);
	if (error)
		goto out_napi_del;

	/* Print device information */
	netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);

	platform_set_drvdata(pdev, ndev);

	return 0;

out_napi_del:
	netif_napi_del(&priv->napi[RAVB_NC]);
	netif_napi_del(&priv->napi[RAVB_BE]);
	ravb_mdio_release(priv);
out_dma_free:
2123
	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2124
			  priv->desc_bat_dma);
2125 2126 2127 2128

	/* Stop PTP Clock driver */
	if (chip_id != RCAR_GEN2)
		ravb_ptp_stop(ndev);
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
out_release:
	if (ndev)
		free_netdev(ndev);

	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
	return error;
}

static int ravb_remove(struct platform_device *pdev)
{
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct ravb_private *priv = netdev_priv(ndev);

2143 2144 2145 2146
	/* Stop PTP Clock driver */
	if (priv->chip_id != RCAR_GEN2)
		ravb_ptp_stop(ndev);

2147
	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
			  priv->desc_bat_dma);
	/* Set reset mode */
	ravb_write(ndev, CCC_OPC_RESET, CCC);
	pm_runtime_put_sync(&pdev->dev);
	unregister_netdev(ndev);
	netif_napi_del(&priv->napi[RAVB_NC]);
	netif_napi_del(&priv->napi[RAVB_BE]);
	ravb_mdio_release(priv);
	pm_runtime_disable(&pdev->dev);
	free_netdev(ndev);
	platform_set_drvdata(pdev, NULL);

	return 0;
}

2163
static int __maybe_unused ravb_suspend(struct device *dev)
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
{
	struct net_device *ndev = dev_get_drvdata(dev);
	int ret = 0;

	if (netif_running(ndev)) {
		netif_device_detach(ndev);
		ret = ravb_close(ndev);
	}

	return ret;
}

2176
static int __maybe_unused ravb_resume(struct device *dev)
2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct ravb_private *priv = netdev_priv(ndev);
	int ret = 0;

	/* All register have been reset to default values.
	 * Restore all registers which where setup at probe time and
	 * reopen device if it was running before system suspended.
	 */

	/* Set AVB config mode */
	ravb_set_config_mode(ndev);

	/* Set GTI value */
	ret = ravb_set_gti(ndev);
	if (ret)
		return ret;

	/* Request GTI loading */
	ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);

2198 2199 2200
	if (priv->chip_id != RCAR_GEN2)
		ravb_set_delay_mode(ndev);

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
	/* Restore descriptor base address table */
	ravb_write(ndev, priv->desc_bat_dma, DBAT);

	if (netif_running(ndev)) {
		ret = ravb_open(ndev);
		if (ret < 0)
			return ret;
		netif_device_attach(ndev);
	}

	return ret;
}

2214
static int __maybe_unused ravb_runtime_nop(struct device *dev)
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
{
	/* Runtime PM callback shared between ->runtime_suspend()
	 * and ->runtime_resume(). Simply returns success.
	 *
	 * This driver re-initializes all registers after
	 * pm_runtime_get_sync() anyway so there is no need
	 * to save and restore registers here.
	 */
	return 0;
}

static const struct dev_pm_ops ravb_dev_pm_ops = {
2227
	SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
2228
	SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
2229 2230 2231 2232 2233 2234 2235
};

static struct platform_driver ravb_driver = {
	.probe		= ravb_probe,
	.remove		= ravb_remove,
	.driver = {
		.name	= "ravb",
2236
		.pm	= &ravb_dev_pm_ops,
2237 2238 2239 2240 2241 2242 2243 2244 2245
		.of_match_table = ravb_match_table,
	},
};

module_platform_driver(ravb_driver);

MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
MODULE_LICENSE("GPL v2");