cpsw.c 70.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Texas Instruments Ethernet Switch Driver
 *
 * Copyright (C) 2012 Texas Instruments
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation version 2.
 *
 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 * kind, whether express or implied; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
27
#include <linux/net_tstamp.h>
28 29 30
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
31
#include <linux/pm_runtime.h>
32
#include <linux/gpio.h>
33
#include <linux/of.h>
34
#include <linux/of_mdio.h>
35 36
#include <linux/of_net.h>
#include <linux/of_device.h>
37
#include <linux/if_vlan.h>
38

39
#include <linux/pinctrl/consumer.h>
40

41
#include "cpsw.h"
42
#include "cpsw_ale.h"
43
#include "cpts.h"
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#include "davinci_cpdma.h"

#define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
			 NETIF_MSG_RX_STATUS)

#define cpsw_info(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_info(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_err(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_err(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_dbg(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_notice(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

79 80
#define ALE_ALL_PORTS		0x7

81 82 83 84
#define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
#define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
#define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)

85 86
#define CPSW_VERSION_1		0x19010a
#define CPSW_VERSION_2		0x19010c
87
#define CPSW_VERSION_3		0x19010f
88
#define CPSW_VERSION_4		0x190112
89 90 91 92 93 94 95 96 97

#define HOST_PORT_NUM		0
#define SLIVER_SIZE		0x40

#define CPSW1_HOST_PORT_OFFSET	0x028
#define CPSW1_SLAVE_OFFSET	0x050
#define CPSW1_SLAVE_SIZE	0x040
#define CPSW1_CPDMA_OFFSET	0x100
#define CPSW1_STATERAM_OFFSET	0x200
98
#define CPSW1_HW_STATS		0x400
99 100 101 102 103 104 105 106
#define CPSW1_CPTS_OFFSET	0x500
#define CPSW1_ALE_OFFSET	0x600
#define CPSW1_SLIVER_OFFSET	0x700

#define CPSW2_HOST_PORT_OFFSET	0x108
#define CPSW2_SLAVE_OFFSET	0x200
#define CPSW2_SLAVE_SIZE	0x100
#define CPSW2_CPDMA_OFFSET	0x800
107
#define CPSW2_HW_STATS		0x900
108 109 110 111 112 113
#define CPSW2_STATERAM_OFFSET	0xa00
#define CPSW2_CPTS_OFFSET	0xc00
#define CPSW2_ALE_OFFSET	0xd00
#define CPSW2_SLIVER_OFFSET	0xd80
#define CPSW2_BD_OFFSET		0x2000

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
#define CPDMA_RXTHRESH		0x0c0
#define CPDMA_RXFREE		0x0e0
#define CPDMA_TXHDP		0x00
#define CPDMA_RXHDP		0x20
#define CPDMA_TXCP		0x40
#define CPDMA_RXCP		0x60

#define CPSW_POLL_WEIGHT	64
#define CPSW_MIN_PACKET_SIZE	60
#define CPSW_MAX_PACKET_SIZE	(1500 + 14 + 4 + 4)

#define RX_PRIORITY_MAPPING	0x76543210
#define TX_PRIORITY_MAPPING	0x33221100
#define CPDMA_TX_PRIORITY_MAP	0x76543210

129 130 131
#define CPSW_VLAN_AWARE		BIT(1)
#define CPSW_ALE_VLAN_AWARE	1

132 133 134
#define CPSW_FIFO_NORMAL_MODE		(0 << 16)
#define CPSW_FIFO_DUAL_MAC_MODE		(1 << 16)
#define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 16)
135

136 137 138 139 140 141 142
#define CPSW_INTPACEEN		(0x3f << 16)
#define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
#define CPSW_CMINTMAX_CNT	63
#define CPSW_CMINTMIN_CNT	2
#define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)

143 144 145
#define cpsw_slave_index(cpsw, priv)				\
		((cpsw->data.dual_emac) ? priv->emac_port :	\
		cpsw->data.active_slave)
146
#define IRQ_NUM			2
147

148 149 150 151 152 153 154 155 156 157 158 159
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");

static int ale_ageout = 10;
module_param(ale_ageout, int, 0);
MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");

static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");

160
struct cpsw_wr_regs {
161 162 163 164 165 166 167 168
	u32	id_ver;
	u32	soft_reset;
	u32	control;
	u32	int_control;
	u32	rx_thresh_en;
	u32	rx_en;
	u32	tx_en;
	u32	misc_en;
169 170 171 172 173 174 175 176 177
	u32	mem_allign1[8];
	u32	rx_thresh_stat;
	u32	rx_stat;
	u32	tx_stat;
	u32	misc_stat;
	u32	mem_allign2[8];
	u32	rx_imax;
	u32	tx_imax;

178 179
};

180
struct cpsw_ss_regs {
181 182 183 184 185
	u32	id_ver;
	u32	control;
	u32	soft_reset;
	u32	stat_port_en;
	u32	ptype;
186 187 188 189 190 191 192 193
	u32	soft_idle;
	u32	thru_rate;
	u32	gap_thresh;
	u32	tx_start_wds;
	u32	flow_control;
	u32	vlan_ltype;
	u32	ts_ltype;
	u32	dlr_ltype;
194 195
};

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/* CPSW_PORT_V1 */
#define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
#define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
#define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
#define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
#define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
#define CPSW1_TS_CTL        0x14 /* Time Sync Control */
#define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
#define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */

/* CPSW_PORT_V2 */
#define CPSW2_CONTROL       0x00 /* Control Register */
#define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
#define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
#define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
#define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
#define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
#define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */

/* CPSW_PORT_V1 and V2 */
#define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
#define SA_HI               0x24 /* CPGMAC_SL Source Address High */
#define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */

/* CPSW_PORT_V2 only */
#define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */

/* Bit definitions for the CPSW2_CONTROL register */
#define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
#define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
#define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
#define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
#define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
#define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
#define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
#define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
241 242
#define TS_TTL_NONZERO      (1<<8)  /* Time Sync Time To Live Non-zero enable */
#define TS_ANNEX_F_EN       (1<<6)  /* Time Sync Annex F enable */
243 244 245 246 247 248
#define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
#define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
#define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
#define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */

249 250 251
#define CTRL_V2_TS_BITS \
	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN)
252

253 254 255 256 257 258 259 260 261 262 263 264 265
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
#define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)


#define CTRL_V3_TS_BITS \
	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
	 TS_LTYPE1_EN)

#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
#define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
266 267 268 269 270 271 272 273 274

/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
#define TS_SEQ_ID_OFFSET_MASK    (0x3f)
#define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
#define TS_MSG_TYPE_EN_MASK      (0xffff)

/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
275

276 277 278 279 280 281 282 283
/* Bit definitions for the CPSW1_TS_CTL register */
#define CPSW_V1_TS_RX_EN		BIT(0)
#define CPSW_V1_TS_TX_EN		BIT(4)
#define CPSW_V1_MSG_TYPE_OFS		16

/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT	16

284 285 286
struct cpsw_host_regs {
	u32	max_blks;
	u32	blk_cnt;
287
	u32	tx_in_ctl;
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	u32	port_vlan;
	u32	tx_pri_map;
	u32	cpdma_tx_pri_map;
	u32	cpdma_rx_chan_map;
};

struct cpsw_sliver_regs {
	u32	id_ver;
	u32	mac_control;
	u32	mac_status;
	u32	soft_reset;
	u32	rx_maxlen;
	u32	__reserved_0;
	u32	rx_pause;
	u32	tx_pause;
	u32	__reserved_1;
	u32	rx_pri_map;
};

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
struct cpsw_hw_stats {
	u32	rxgoodframes;
	u32	rxbroadcastframes;
	u32	rxmulticastframes;
	u32	rxpauseframes;
	u32	rxcrcerrors;
	u32	rxaligncodeerrors;
	u32	rxoversizedframes;
	u32	rxjabberframes;
	u32	rxundersizedframes;
	u32	rxfragments;
	u32	__pad_0[2];
	u32	rxoctets;
	u32	txgoodframes;
	u32	txbroadcastframes;
	u32	txmulticastframes;
	u32	txpauseframes;
	u32	txdeferredframes;
	u32	txcollisionframes;
	u32	txsinglecollframes;
	u32	txmultcollframes;
	u32	txexcessivecollisions;
	u32	txlatecollisions;
	u32	txunderrun;
	u32	txcarriersenseerrors;
	u32	txoctets;
	u32	octetframes64;
	u32	octetframes65t127;
	u32	octetframes128t255;
	u32	octetframes256t511;
	u32	octetframes512t1023;
	u32	octetframes1024tup;
	u32	netoctets;
	u32	rxsofoverruns;
	u32	rxmofoverruns;
	u32	rxdmaoverruns;
};

345
struct cpsw_slave {
346
	void __iomem			*regs;
347 348 349 350 351
	struct cpsw_sliver_regs __iomem	*sliver;
	int				slave_num;
	u32				mac_control;
	struct cpsw_slave_data		*data;
	struct phy_device		*phy;
352 353 354
	struct net_device		*ndev;
	u32				port_vlan;
	u32				open_stat;
355 356
};

357 358 359 360 361 362 363 364 365 366
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
{
	return __raw_readl(slave->regs + offset);
}

static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
{
	__raw_writel(val, slave->regs + offset);
}

367
struct cpsw_common {
368
	struct device			*dev;
369
	struct cpsw_platform_data	data;
370 371
	struct napi_struct		napi_rx;
	struct napi_struct		napi_tx;
372 373 374 375
	struct cpsw_ss_regs __iomem	*regs;
	struct cpsw_wr_regs __iomem	*wr_regs;
	u8 __iomem			*hw_stats;
	struct cpsw_host_regs __iomem	*host_port_regs;
376 377 378 379
	u32				version;
	u32				coal_intvl;
	u32				bus_freq_mhz;
	int				rx_packet_max;
380
	struct cpsw_slave		*slaves;
381 382
	struct cpdma_ctlr		*dma;
	struct cpdma_chan		*txch, *rxch;
383
	struct cpsw_ale			*ale;
384 385 386 387
	bool				quirk_irq;
	bool				rx_irq_disabled;
	bool				tx_irq_disabled;
	u32 irqs_table[IRQ_NUM];
388
	struct cpts			*cpts;
389 390 391
};

struct cpsw_priv {
392 393 394 395
	struct net_device		*ndev;
	struct device			*dev;
	u32				msg_enable;
	u8				mac_addr[ETH_ALEN];
396 397
	bool				rx_pause;
	bool				tx_pause;
398
	u32 emac_port;
399
	struct cpsw_common *cpsw;
400 401
};

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
struct cpsw_stats {
	char stat_string[ETH_GSTRING_LEN];
	int type;
	int sizeof_stat;
	int stat_offset;
};

enum {
	CPSW_STATS,
	CPDMA_RX_STATS,
	CPDMA_TX_STATS,
};

#define CPSW_STAT(m)		CPSW_STATS,				\
				sizeof(((struct cpsw_hw_stats *)0)->m), \
				offsetof(struct cpsw_hw_stats, m)
#define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
				sizeof(((struct cpdma_chan_stats *)0)->m), \
				offsetof(struct cpdma_chan_stats, m)
#define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
				sizeof(((struct cpdma_chan_stats *)0)->m), \
				offsetof(struct cpdma_chan_stats, m)

static const struct cpsw_stats cpsw_gstrings_stats[] = {
	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
	{ "Rx Fragments", CPSW_STAT(rxfragments) },
	{ "Rx Octets", CPSW_STAT(rxoctets) },
	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
	{ "Collisions", CPSW_STAT(txcollisionframes) },
	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
	{ "Tx Underrun", CPSW_STAT(txunderrun) },
	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
	{ "Tx Octets", CPSW_STAT(txoctets) },
	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
	{ "Net Octets", CPSW_STAT(netoctets) },
	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
	{ "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
	{ "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
	{ "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
	{ "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
	{ "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
	{ "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
	{ "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
	{ "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
	{ "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
	{ "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
	{ "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
	{ "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
	{ "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
	{ "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
	{ "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
	{ "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
	{ "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
	{ "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
	{ "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
	{ "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
	{ "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
	{ "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
	{ "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
	{ "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
	{ "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
	{ "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
};

#define CPSW_STATS_LEN	ARRAY_SIZE(cpsw_gstrings_stats)

490
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
491
#define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
492 493
#define for_each_slave(priv, func, arg...)				\
	do {								\
494
		struct cpsw_slave *slave;				\
495
		struct cpsw_common *cpsw = (priv)->cpsw;		\
496
		int n;							\
497 498
		if (cpsw->data.dual_emac)				\
			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
499
		else							\
500 501
			for (n = cpsw->data.slaves,			\
					slave = cpsw->slaves;		\
502 503
					n; n--)				\
				(func)(slave++, ##arg);			\
504 505
	} while (0)

506
#define cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb)		\
507
	do {								\
508
		if (!cpsw->data.dual_emac)				\
509 510
			break;						\
		if (CPDMA_RX_SOURCE_PORT(status) == 1) {		\
511
			ndev = cpsw->slaves[0].ndev;			\
512 513
			skb->dev = ndev;				\
		} else if (CPDMA_RX_SOURCE_PORT(status) == 2) {		\
514
			ndev = cpsw->slaves[1].ndev;			\
515 516
			skb->dev = ndev;				\
		}							\
517
	} while (0)
518
#define cpsw_add_mcast(cpsw, priv, addr)				\
519
	do {								\
520 521
		if (cpsw->data.dual_emac) {				\
			struct cpsw_slave *slave = cpsw->slaves +	\
522
						priv->emac_port;	\
523
			int slave_port = cpsw_get_slave_port(		\
524
						slave->slave_num);	\
525
			cpsw_ale_add_mcast(cpsw->ale, addr,		\
526
				1 << slave_port | ALE_PORT_HOST,	\
527 528
				ALE_VLAN, slave->port_vlan, 0);		\
		} else {						\
529
			cpsw_ale_add_mcast(cpsw->ale, addr,		\
530
				ALE_ALL_PORTS,				\
531 532 533 534
				0, 0, 0);				\
		}							\
	} while (0)

535
static inline int cpsw_get_slave_port(u32 slave_num)
536
{
537
	return slave_num + 1;
538
}
539

540 541
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
542 543
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
	struct cpsw_ale *ale = cpsw->ale;
544 545
	int i;

546
	if (cpsw->data.dual_emac) {
547 548 549 550 551 552
		bool flag = false;

		/* Enabling promiscuous mode for one interface will be
		 * common for both the interface as the interface shares
		 * the same hardware resource.
		 */
553 554
		for (i = 0; i < cpsw->data.slaves; i++)
			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
				flag = true;

		if (!enable && flag) {
			enable = true;
			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
		}

		if (enable) {
			/* Enable Bypass */
			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);

			dev_dbg(&ndev->dev, "promiscuity enabled\n");
		} else {
			/* Disable Bypass */
			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
			dev_dbg(&ndev->dev, "promiscuity disabled\n");
		}
	} else {
		if (enable) {
			unsigned long timeout = jiffies + HZ;

576
			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
577
			for (i = 0; i <= cpsw->data.slaves; i++) {
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NOLEARN, 1);
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NO_SA_UPDATE, 1);
			}

			/* Clear All Untouched entries */
			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
			do {
				cpu_relax();
				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
					break;
			} while (time_after(timeout, jiffies));
			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);

			/* Clear all mcast from ALE */
594
			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
595 596 597 598 599

			/* Flood All Unicast Packets to Host port */
			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
			dev_dbg(&ndev->dev, "promiscuity enabled\n");
		} else {
600
			/* Don't Flood All Unicast Packets to Host port */
601 602
			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);

603
			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
604
			for (i = 0; i <= cpsw->data.slaves; i++) {
605 606 607 608 609 610 611 612 613 614
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NOLEARN, 0);
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NO_SA_UPDATE, 0);
			}
			dev_dbg(&ndev->dev, "promiscuity disabled\n");
		}
	}
}

615 616 617
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
618
	struct cpsw_common *cpsw = priv->cpsw;
619 620
	int vid;

621 622
	if (cpsw->data.dual_emac)
		vid = cpsw->slaves[priv->emac_port].port_vlan;
623
	else
624
		vid = cpsw->data.default_vlan;
625 626 627

	if (ndev->flags & IFF_PROMISC) {
		/* Enable promiscuous mode */
628
		cpsw_set_promiscious(ndev, true);
629
		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
630
		return;
631 632 633
	} else {
		/* Disable promiscuous mode */
		cpsw_set_promiscious(ndev, false);
634 635
	}

636
	/* Restore allmulti on vlans if necessary */
637
	cpsw_ale_set_allmulti(cpsw->ale, priv->ndev->flags & IFF_ALLMULTI);
638

639
	/* Clear all mcast from ALE */
640
	cpsw_ale_flush_multicast(cpsw->ale, ALE_ALL_PORTS, vid);
641 642 643 644 645 646

	if (!netdev_mc_empty(ndev)) {
		struct netdev_hw_addr *ha;

		/* program multicast address list into ALE register */
		netdev_for_each_mc_addr(ha, ndev) {
647
			cpsw_add_mcast(cpsw, priv, (u8 *)ha->addr);
648 649 650 651
		}
	}
}

652
static void cpsw_intr_enable(struct cpsw_common *cpsw)
653
{
654 655
	__raw_writel(0xFF, &cpsw->wr_regs->tx_en);
	__raw_writel(0xFF, &cpsw->wr_regs->rx_en);
656

657
	cpdma_ctlr_int_ctrl(cpsw->dma, true);
658 659 660
	return;
}

661
static void cpsw_intr_disable(struct cpsw_common *cpsw)
662
{
663 664
	__raw_writel(0, &cpsw->wr_regs->tx_en);
	__raw_writel(0, &cpsw->wr_regs->rx_en);
665

666
	cpdma_ctlr_int_ctrl(cpsw->dma, false);
667 668 669
	return;
}

670
static void cpsw_tx_handler(void *token, int len, int status)
671 672 673
{
	struct sk_buff		*skb = token;
	struct net_device	*ndev = skb->dev;
674
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
675

676 677 678
	/* Check whether the queue is stopped due to stalled tx dma, if the
	 * queue is stopped then start the queue as we have free desc for tx
	 */
679
	if (unlikely(netif_queue_stopped(ndev)))
680
		netif_wake_queue(ndev);
681
	cpts_tx_timestamp(cpsw->cpts, skb);
682 683
	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += len;
684 685 686
	dev_kfree_skb_any(skb);
}

687
static void cpsw_rx_handler(void *token, int len, int status)
688 689
{
	struct sk_buff		*skb = token;
690
	struct sk_buff		*new_skb;
691 692
	struct net_device	*ndev = skb->dev;
	int			ret = 0;
693
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
694

695
	cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
696

697
	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
698
		bool ndev_status = false;
699
		struct cpsw_slave *slave = cpsw->slaves;
700 701
		int n;

702
		if (cpsw->data.dual_emac) {
703
			/* In dual emac mode check for all interfaces */
704
			for (n = cpsw->data.slaves; n; n--, slave++)
705 706 707 708 709 710 711
				if (netif_running(slave->ndev))
					ndev_status = true;
		}

		if (ndev_status && (status >= 0)) {
			/* The packet received is for the interface which
			 * is already down and the other interface is up
712
			 * and running, instead of freeing which results
713 714 715 716 717 718 719
			 * in reducing of the number of rx descriptor in
			 * DMA engine, requeue skb back to cpdma.
			 */
			new_skb = skb;
			goto requeue;
		}

720
		/* the interface is going down, skbs are purged */
721 722 723
		dev_kfree_skb_any(skb);
		return;
	}
724

725
	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
726
	if (new_skb) {
727
		skb_put(skb, len);
728
		cpts_rx_timestamp(cpsw->cpts, skb);
729 730
		skb->protocol = eth_type_trans(skb, ndev);
		netif_receive_skb(skb);
731 732
		ndev->stats.rx_bytes += len;
		ndev->stats.rx_packets++;
733
		kmemleak_not_leak(new_skb);
734
	} else {
735
		ndev->stats.rx_dropped++;
736
		new_skb = skb;
737 738
	}

739
requeue:
740 741
	ret = cpdma_chan_submit(cpsw->rxch, new_skb, new_skb->data,
				skb_tailroom(new_skb), 0);
742 743
	if (WARN_ON(ret < 0))
		dev_kfree_skb_any(new_skb);
744 745
}

746
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
747
{
748
	struct cpsw_common *cpsw = dev_id;
749

750
	writel(0, &cpsw->wr_regs->tx_en);
751
	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
752

753 754 755
	if (cpsw->quirk_irq) {
		disable_irq_nosync(cpsw->irqs_table[1]);
		cpsw->tx_irq_disabled = true;
756 757
	}

758
	napi_schedule(&cpsw->napi_tx);
759 760 761 762 763
	return IRQ_HANDLED;
}

static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
{
764
	struct cpsw_common *cpsw = dev_id;
765

766
	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
767
	writel(0, &cpsw->wr_regs->rx_en);
768

769 770 771
	if (cpsw->quirk_irq) {
		disable_irq_nosync(cpsw->irqs_table[0]);
		cpsw->rx_irq_disabled = true;
772 773
	}

774
	napi_schedule(&cpsw->napi_rx);
775
	return IRQ_HANDLED;
776 777
}

778 779
static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
{
780
	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
781 782
	int			num_tx;

783
	num_tx = cpdma_chan_process(cpsw->txch, budget);
784 785
	if (num_tx < budget) {
		napi_complete(napi_tx);
786
		writel(0xff, &cpsw->wr_regs->tx_en);
787 788 789
		if (cpsw->quirk_irq && cpsw->tx_irq_disabled) {
			cpsw->tx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[1]);
790
		}
791 792 793 794 795 796
	}

	return num_tx;
}

static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
797
{
798
	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
799
	int			num_rx;
800

801
	num_rx = cpdma_chan_process(cpsw->rxch, budget);
802
	if (num_rx < budget) {
803
		napi_complete(napi_rx);
804
		writel(0xff, &cpsw->wr_regs->rx_en);
805 806 807
		if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
			cpsw->rx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[0]);
808
		}
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
	}

	return num_rx;
}

static inline void soft_reset(const char *module, void __iomem *reg)
{
	unsigned long timeout = jiffies + HZ;

	__raw_writel(1, reg);
	do {
		cpu_relax();
	} while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));

	WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
}

#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
			 ((mac)[2] << 16) | ((mac)[3] << 24))
#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))

static void cpsw_set_slave_mac(struct cpsw_slave *slave,
			       struct cpsw_priv *priv)
{
833 834
	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
835 836 837 838 839 840 841 842
}

static void _cpsw_adjust_link(struct cpsw_slave *slave,
			      struct cpsw_priv *priv, bool *link)
{
	struct phy_device	*phy = slave->phy;
	u32			mac_control = 0;
	u32			slave_port;
843
	struct cpsw_common *cpsw = priv->cpsw;
844 845 846 847

	if (!phy)
		return;

848
	slave_port = cpsw_get_slave_port(slave->slave_num);
849 850

	if (phy->link) {
851
		mac_control = cpsw->data.mac_control;
852 853

		/* enable forwarding */
854
		cpsw_ale_control_set(cpsw->ale, slave_port,
855 856 857 858 859 860
				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);

		if (phy->speed == 1000)
			mac_control |= BIT(7);	/* GIGABITEN	*/
		if (phy->duplex)
			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
861 862 863 864

		/* set speed_in input in case RMII mode is used in 100Mbps */
		if (phy->speed == 100)
			mac_control |= BIT(15);
865 866
		else if (phy->speed == 10)
			mac_control |= BIT(18); /* In Band mode */
867

868 869 870 871 872 873
		if (priv->rx_pause)
			mac_control |= BIT(3);

		if (priv->tx_pause)
			mac_control |= BIT(4);

874 875 876 877
		*link = true;
	} else {
		mac_control = 0;
		/* disable forwarding */
878
		cpsw_ale_control_set(cpsw->ale, slave_port,
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
	}

	if (mac_control != slave->mac_control) {
		phy_print_status(phy);
		__raw_writel(mac_control, &slave->sliver->mac_control);
	}

	slave->mac_control = mac_control;
}

static void cpsw_adjust_link(struct net_device *ndev)
{
	struct cpsw_priv	*priv = netdev_priv(ndev);
	bool			link = false;

	for_each_slave(priv, _cpsw_adjust_link, priv, &link);

	if (link) {
		netif_carrier_on(ndev);
		if (netif_running(ndev))
			netif_wake_queue(ndev);
	} else {
		netif_carrier_off(ndev);
		netif_stop_queue(ndev);
	}
}

907 908 909
static int cpsw_get_coalesce(struct net_device *ndev,
				struct ethtool_coalesce *coal)
{
910
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
911

912
	coal->rx_coalesce_usecs = cpsw->coal_intvl;
913 914 915 916 917 918 919 920 921 922 923 924
	return 0;
}

static int cpsw_set_coalesce(struct net_device *ndev,
				struct ethtool_coalesce *coal)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	u32 int_ctrl;
	u32 num_interrupts = 0;
	u32 prescale = 0;
	u32 addnl_dvdr = 1;
	u32 coal_intvl = 0;
925
	struct cpsw_common *cpsw = priv->cpsw;
926 927 928

	coal_intvl = coal->rx_coalesce_usecs;

929
	int_ctrl =  readl(&cpsw->wr_regs->int_control);
930
	prescale = cpsw->bus_freq_mhz * 4;
931

932 933 934 935 936
	if (!coal->rx_coalesce_usecs) {
		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
		goto update_return;
	}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	if (coal_intvl < CPSW_CMINTMIN_INTVL)
		coal_intvl = CPSW_CMINTMIN_INTVL;

	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
		/* Interrupt pacer works with 4us Pulse, we can
		 * throttle further by dilating the 4us pulse.
		 */
		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;

		if (addnl_dvdr > 1) {
			prescale *= addnl_dvdr;
			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
				coal_intvl = (CPSW_CMINTMAX_INTVL
						* addnl_dvdr);
		} else {
			addnl_dvdr = 1;
			coal_intvl = CPSW_CMINTMAX_INTVL;
		}
	}

	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
958 959
	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
960 961 962 963

	int_ctrl |= CPSW_INTPACEEN;
	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
964 965

update_return:
966
	writel(int_ctrl, &cpsw->wr_regs->int_control);
967 968

	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
969
	cpsw->coal_intvl = coal_intvl;
970 971 972 973

	return 0;
}

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
static int cpsw_get_sset_count(struct net_device *ndev, int sset)
{
	switch (sset) {
	case ETH_SS_STATS:
		return CPSW_STATS_LEN;
	default:
		return -EOPNOTSUPP;
	}
}

static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
	u8 *p = data;
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < CPSW_STATS_LEN; i++) {
			memcpy(p, cpsw_gstrings_stats[i].stat_string,
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
		break;
	}
}

static void cpsw_get_ethtool_stats(struct net_device *ndev,
				    struct ethtool_stats *stats, u64 *data)
{
	struct cpdma_chan_stats rx_stats;
	struct cpdma_chan_stats tx_stats;
	u32 val;
	u8 *p;
	int i;
1008
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1009 1010

	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
1011 1012
	cpdma_chan_get_stats(cpsw->rxch, &rx_stats);
	cpdma_chan_get_stats(cpsw->txch, &tx_stats);
1013 1014 1015 1016

	for (i = 0; i < CPSW_STATS_LEN; i++) {
		switch (cpsw_gstrings_stats[i].type) {
		case CPSW_STATS:
1017
			val = readl(cpsw->hw_stats +
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
				    cpsw_gstrings_stats[i].stat_offset);
			data[i] = val;
			break;

		case CPDMA_RX_STATS:
			p = (u8 *)&rx_stats +
				cpsw_gstrings_stats[i].stat_offset;
			data[i] = *(u32 *)p;
			break;

		case CPDMA_TX_STATS:
			p = (u8 *)&tx_stats +
				cpsw_gstrings_stats[i].stat_offset;
			data[i] = *(u32 *)p;
			break;
		}
	}
}

1037
static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
1038 1039 1040 1041
{
	u32 i;
	u32 usage_count = 0;

1042
	if (!cpsw->data.dual_emac)
1043 1044
		return 0;

1045 1046
	for (i = 0; i < cpsw->data.slaves; i++)
		if (cpsw->slaves[i].open_stat)
1047 1048 1049 1050 1051
			usage_count++;

	return usage_count;
}

1052 1053
static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
					struct sk_buff *skb)
1054
{
1055 1056 1057
	struct cpsw_common *cpsw = priv->cpsw;

	return cpdma_chan_submit(cpsw->txch, skb, skb->data, skb->len,
1058
				 priv->emac_port + cpsw->data.dual_emac);
1059 1060 1061 1062 1063 1064
}

static inline void cpsw_add_dual_emac_def_ale_entries(
		struct cpsw_priv *priv, struct cpsw_slave *slave,
		u32 slave_port)
{
1065
	struct cpsw_common *cpsw = priv->cpsw;
1066
	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1067

1068
	if (cpsw->version == CPSW_VERSION_1)
1069 1070 1071
		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
	else
		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1072
	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1073
			  port_mask, port_mask, 0);
1074
	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1075
			   port_mask, ALE_VLAN, slave->port_vlan, 0);
1076 1077 1078
	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
			   HOST_PORT_NUM, ALE_VLAN |
			   ALE_SECURE, slave->port_vlan);
1079 1080
}

1081
static void soft_reset_slave(struct cpsw_slave *slave)
1082 1083 1084
{
	char name[32];

1085
	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1086
	soft_reset(name, &slave->sliver->soft_reset);
1087 1088 1089 1090 1091
}

static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
	u32 slave_port;
1092
	struct cpsw_common *cpsw = priv->cpsw;
1093 1094

	soft_reset_slave(slave);
1095 1096 1097

	/* setup priority mapping */
	__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1098

1099
	switch (cpsw->version) {
1100 1101 1102 1103
	case CPSW_VERSION_1:
		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
		break;
	case CPSW_VERSION_2:
1104
	case CPSW_VERSION_3:
1105
	case CPSW_VERSION_4:
1106 1107 1108
		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
		break;
	}
1109 1110

	/* setup max packet size, and mac address */
1111
	__raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1112 1113 1114 1115
	cpsw_set_slave_mac(slave, priv);

	slave->mac_control = 0;	/* no link yet */

1116
	slave_port = cpsw_get_slave_port(slave->slave_num);
1117

1118
	if (cpsw->data.dual_emac)
1119 1120
		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
	else
1121
		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1122
				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1123

1124
	if (slave->data->phy_node) {
1125
		slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1126
				 &cpsw_adjust_link, 0, slave->data->phy_if);
1127 1128 1129 1130 1131 1132 1133
		if (!slave->phy) {
			dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
				slave->data->phy_node->full_name,
				slave->slave_num);
			return;
		}
	} else {
1134
		slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
1135
				 &cpsw_adjust_link, slave->data->phy_if);
1136 1137 1138 1139 1140 1141 1142 1143 1144
		if (IS_ERR(slave->phy)) {
			dev_err(priv->dev,
				"phy \"%s\" not found on slave %d, err %ld\n",
				slave->data->phy_id, slave->slave_num,
				PTR_ERR(slave->phy));
			slave->phy = NULL;
			return;
		}
	}
1145

1146
	phy_attached_info(slave->phy);
1147

1148 1149 1150
	phy_start(slave->phy);

	/* Configure GMII_SEL register */
1151
	cpsw_phy_sel(cpsw->dev, slave->phy->interface, slave->slave_num);
1152 1153
}

1154 1155
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
1156 1157
	struct cpsw_common *cpsw = priv->cpsw;
	const int vlan = cpsw->data.default_vlan;
1158 1159
	u32 reg;
	int i;
1160
	int unreg_mcast_mask;
1161

1162
	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1163 1164
	       CPSW2_PORT_VLAN;

1165
	writel(vlan, &cpsw->host_port_regs->port_vlan);
1166

1167 1168
	for (i = 0; i < cpsw->data.slaves; i++)
		slave_write(cpsw->slaves + i, vlan, reg);
1169

1170 1171 1172 1173 1174
	if (priv->ndev->flags & IFF_ALLMULTI)
		unreg_mcast_mask = ALE_ALL_PORTS;
	else
		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;

1175
	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1176 1177
			  ALE_ALL_PORTS, ALE_ALL_PORTS,
			  unreg_mcast_mask);
1178 1179
}

1180 1181
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
1182
	u32 fifo_mode;
1183 1184
	u32 control_reg;
	struct cpsw_common *cpsw = priv->cpsw;
1185

1186
	/* soft reset the controller and initialize ale */
1187
	soft_reset("cpsw", &cpsw->regs->soft_reset);
1188
	cpsw_ale_start(cpsw->ale);
1189 1190

	/* switch to vlan unaware mode */
1191
	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1192
			     CPSW_ALE_VLAN_AWARE);
1193
	control_reg = readl(&cpsw->regs->control);
1194
	control_reg |= CPSW_VLAN_AWARE;
1195
	writel(control_reg, &cpsw->regs->control);
1196
	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1197
		     CPSW_FIFO_NORMAL_MODE;
1198
	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1199 1200 1201

	/* setup host port priority mapping */
	__raw_writel(CPDMA_TX_PRIORITY_MAP,
1202 1203
		     &cpsw->host_port_regs->cpdma_tx_pri_map);
	__raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1204

1205
	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1206 1207
			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);

1208
	if (!cpsw->data.dual_emac) {
1209
		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1210
				   0, 0);
1211
		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1212
				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1213
	}
1214 1215
}

1216
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1217
{
1218 1219
	u32 slave_port;

1220
	slave_port = cpsw_get_slave_port(slave->slave_num);
1221

1222 1223 1224 1225 1226
	if (!slave->phy)
		return;
	phy_stop(slave->phy);
	phy_disconnect(slave->phy);
	slave->phy = NULL;
1227
	cpsw_ale_control_set(cpsw->ale, slave_port,
1228
			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1229
	soft_reset_slave(slave);
1230 1231
}

1232 1233 1234
static int cpsw_ndo_open(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1235
	struct cpsw_common *cpsw = priv->cpsw;
1236 1237 1238
	int i, ret;
	u32 reg;

1239
	ret = pm_runtime_get_sync(cpsw->dev);
1240
	if (ret < 0) {
1241
		pm_runtime_put_noidle(cpsw->dev);
1242 1243
		return ret;
	}
1244

1245
	if (!cpsw_common_res_usage_state(cpsw))
1246
		cpsw_intr_disable(cpsw);
1247 1248
	netif_carrier_off(ndev);

1249
	reg = cpsw->version;
1250 1251 1252 1253 1254 1255

	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
		 CPSW_RTL_VERSION(reg));

	/* initialize host and slave ports */
1256
	if (!cpsw_common_res_usage_state(cpsw))
1257
		cpsw_init_host_port(priv);
1258 1259
	for_each_slave(priv, cpsw_slave_open, priv);

1260
	/* Add default VLAN */
1261
	if (!cpsw->data.dual_emac)
1262 1263
		cpsw_add_default_vlan(priv);
	else
1264
		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1265
				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1266

1267
	if (!cpsw_common_res_usage_state(cpsw)) {
1268
		int buf_num;
1269

1270
		/* setup tx dma to fixed prio and zero offset */
1271 1272
		cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
		cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
1273

1274
		/* disable priority elevation */
1275
		__raw_writel(0, &cpsw->regs->ptype);
1276

1277
		/* enable statistics collection only on all ports */
1278
		__raw_writel(0x7, &cpsw->regs->stat_port_en);
1279

1280
		/* Enable internal fifo flow control */
1281
		writel(0x7, &cpsw->regs->flow_control);
1282

1283 1284
		napi_enable(&cpsw->napi_rx);
		napi_enable(&cpsw->napi_tx);
1285

1286 1287 1288
		if (cpsw->tx_irq_disabled) {
			cpsw->tx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[1]);
1289 1290
		}

1291 1292 1293
		if (cpsw->rx_irq_disabled) {
			cpsw->rx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[0]);
1294 1295
		}

1296
		buf_num = cpdma_chan_get_rx_buf_num(cpsw->dma);
1297
		for (i = 0; i < buf_num; i++) {
1298
			struct sk_buff *skb;
1299

1300
			ret = -ENOMEM;
1301
			skb = __netdev_alloc_skb_ip_align(priv->ndev,
1302
					cpsw->rx_packet_max, GFP_KERNEL);
1303
			if (!skb)
1304
				goto err_cleanup;
1305 1306
			ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
						skb_tailroom(skb), 0);
1307 1308 1309 1310
			if (ret < 0) {
				kfree_skb(skb);
				goto err_cleanup;
			}
1311
			kmemleak_not_leak(skb);
1312 1313 1314 1315 1316
		}
		/* continue even if we didn't manage to submit all
		 * receive descs
		 */
		cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
1317

1318
		if (cpts_register(cpsw->dev, cpsw->cpts,
1319 1320
				  cpsw->data.cpts_clock_mult,
				  cpsw->data.cpts_clock_shift))
1321 1322
			dev_err(priv->dev, "error registering cpts device\n");

1323 1324
	}

1325
	/* Enable Interrupt pacing if configured */
1326
	if (cpsw->coal_intvl != 0) {
1327 1328
		struct ethtool_coalesce coal;

1329
		coal.rx_coalesce_usecs = cpsw->coal_intvl;
1330 1331 1332
		cpsw_set_coalesce(ndev, &coal);
	}

1333 1334
	cpdma_ctlr_start(cpsw->dma);
	cpsw_intr_enable(cpsw);
1335

1336 1337
	if (cpsw->data.dual_emac)
		cpsw->slaves[priv->emac_port].open_stat = true;
1338 1339
	return 0;

1340
err_cleanup:
1341
	cpdma_ctlr_stop(cpsw->dma);
1342
	for_each_slave(priv, cpsw_slave_stop, cpsw);
1343
	pm_runtime_put_sync(cpsw->dev);
1344 1345
	netif_carrier_off(priv->ndev);
	return ret;
1346 1347 1348 1349 1350
}

static int cpsw_ndo_stop(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1351
	struct cpsw_common *cpsw = priv->cpsw;
1352 1353 1354 1355

	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
	netif_stop_queue(priv->ndev);
	netif_carrier_off(priv->ndev);
1356

1357
	if (cpsw_common_res_usage_state(cpsw) <= 1) {
1358 1359
		napi_disable(&cpsw->napi_rx);
		napi_disable(&cpsw->napi_tx);
1360
		cpts_unregister(cpsw->cpts);
1361 1362
		cpsw_intr_disable(cpsw);
		cpdma_ctlr_stop(cpsw->dma);
1363
		cpsw_ale_stop(cpsw->ale);
1364
	}
1365
	for_each_slave(priv, cpsw_slave_stop, cpsw);
1366
	pm_runtime_put_sync(cpsw->dev);
1367 1368
	if (cpsw->data.dual_emac)
		cpsw->slaves[priv->emac_port].open_stat = false;
1369 1370 1371 1372 1373 1374 1375 1376
	return 0;
}

static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
				       struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	int ret;
1377
	struct cpsw_common *cpsw = priv->cpsw;
1378

1379
	netif_trans_update(ndev);
1380 1381 1382

	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
		cpsw_err(priv, tx_err, "packet pad failed\n");
1383
		ndev->stats.tx_dropped++;
1384 1385 1386
		return NETDEV_TX_OK;
	}

1387
	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1388
				cpsw->cpts->tx_enable)
1389 1390 1391 1392
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

	skb_tx_timestamp(skb);

1393
	ret = cpsw_tx_packet_submit(priv, skb);
1394 1395 1396 1397 1398
	if (unlikely(ret != 0)) {
		cpsw_err(priv, tx_err, "desc submit failed\n");
		goto fail;
	}

1399 1400 1401
	/* If there is no more tx desc left free then we need to
	 * tell the kernel to stop sending us tx frames.
	 */
1402
	if (unlikely(!cpdma_check_free_tx_desc(cpsw->txch)))
1403 1404
		netif_stop_queue(ndev);

1405 1406
	return NETDEV_TX_OK;
fail:
1407
	ndev->stats.tx_dropped++;
1408 1409 1410 1411
	netif_stop_queue(ndev);
	return NETDEV_TX_BUSY;
}

1412 1413
#ifdef CONFIG_TI_CPTS

1414
static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
1415
{
1416
	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1417 1418
	u32 ts_en, seq_id;

1419
	if (!cpsw->cpts->tx_enable && !cpsw->cpts->rx_enable) {
1420 1421 1422 1423 1424 1425 1426
		slave_write(slave, 0, CPSW1_TS_CTL);
		return;
	}

	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;

1427
	if (cpsw->cpts->tx_enable)
1428 1429
		ts_en |= CPSW_V1_TS_TX_EN;

1430
	if (cpsw->cpts->rx_enable)
1431 1432 1433 1434 1435 1436 1437 1438
		ts_en |= CPSW_V1_TS_RX_EN;

	slave_write(slave, ts_en, CPSW1_TS_CTL);
	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
}

static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
1439
	struct cpsw_slave *slave;
1440
	struct cpsw_common *cpsw = priv->cpsw;
1441 1442
	u32 ctrl, mtype;

1443 1444
	if (cpsw->data.dual_emac)
		slave = &cpsw->slaves[priv->emac_port];
1445
	else
1446
		slave = &cpsw->slaves[cpsw->data.active_slave];
1447

1448
	ctrl = slave_read(slave, CPSW2_CONTROL);
1449
	switch (cpsw->version) {
1450 1451
	case CPSW_VERSION_2:
		ctrl &= ~CTRL_V2_ALL_TS_MASK;
1452

1453
		if (cpsw->cpts->tx_enable)
1454
			ctrl |= CTRL_V2_TX_TS_BITS;
1455

1456
		if (cpsw->cpts->rx_enable)
1457
			ctrl |= CTRL_V2_RX_TS_BITS;
1458
		break;
1459 1460 1461 1462
	case CPSW_VERSION_3:
	default:
		ctrl &= ~CTRL_V3_ALL_TS_MASK;

1463
		if (cpsw->cpts->tx_enable)
1464 1465
			ctrl |= CTRL_V3_TX_TS_BITS;

1466
		if (cpsw->cpts->rx_enable)
1467
			ctrl |= CTRL_V3_RX_TS_BITS;
1468
		break;
1469
	}
1470 1471 1472 1473 1474

	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;

	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
	slave_write(slave, ctrl, CPSW2_CONTROL);
1475
	__raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
1476 1477
}

1478
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1479
{
1480
	struct cpsw_priv *priv = netdev_priv(dev);
1481
	struct hwtstamp_config cfg;
1482 1483
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpts *cpts = cpsw->cpts;
1484

1485 1486 1487
	if (cpsw->version != CPSW_VERSION_1 &&
	    cpsw->version != CPSW_VERSION_2 &&
	    cpsw->version != CPSW_VERSION_3)
1488 1489
		return -EOPNOTSUPP;

1490 1491 1492 1493 1494 1495 1496
	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
		return -EFAULT;

	/* reserved for future extensions */
	if (cfg.flags)
		return -EINVAL;

1497
	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
		return -ERANGE;

	switch (cfg.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		cpts->rx_enable = 0;
		break;
	case HWTSTAMP_FILTER_ALL:
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
		return -ERANGE;
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		cpts->rx_enable = 1;
		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
		break;
	default:
		return -ERANGE;
	}

1525 1526
	cpts->tx_enable = cfg.tx_type == HWTSTAMP_TX_ON;

1527
	switch (cpsw->version) {
1528
	case CPSW_VERSION_1:
1529
		cpsw_hwtstamp_v1(cpsw);
1530 1531
		break;
	case CPSW_VERSION_2:
1532
	case CPSW_VERSION_3:
1533 1534 1535
		cpsw_hwtstamp_v2(priv);
		break;
	default:
1536
		WARN_ON(1);
1537 1538 1539 1540 1541
	}

	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}

1542 1543
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
1544 1545
	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
	struct cpts *cpts = cpsw->cpts;
1546 1547
	struct hwtstamp_config cfg;

1548 1549 1550
	if (cpsw->version != CPSW_VERSION_1 &&
	    cpsw->version != CPSW_VERSION_2 &&
	    cpsw->version != CPSW_VERSION_3)
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		return -EOPNOTSUPP;

	cfg.flags = 0;
	cfg.tx_type = cpts->tx_enable ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	cfg.rx_filter = (cpts->rx_enable ?
			 HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE);

	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}

1561 1562 1563 1564
#endif /*CONFIG_TI_CPTS*/

static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
1565
	struct cpsw_priv *priv = netdev_priv(dev);
1566 1567
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
1568

1569 1570 1571
	if (!netif_running(dev))
		return -EINVAL;

1572
	switch (cmd) {
1573
#ifdef CONFIG_TI_CPTS
1574
	case SIOCSHWTSTAMP:
1575 1576 1577
		return cpsw_hwtstamp_set(dev, req);
	case SIOCGHWTSTAMP:
		return cpsw_hwtstamp_get(dev, req);
1578
#endif
1579 1580
	}

1581
	if (!cpsw->slaves[slave_no].phy)
1582
		return -EOPNOTSUPP;
1583
	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1584 1585
}

1586 1587 1588
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1589
	struct cpsw_common *cpsw = priv->cpsw;
1590 1591

	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1592
	ndev->stats.tx_errors++;
1593 1594 1595 1596
	cpsw_intr_disable(cpsw);
	cpdma_chan_stop(cpsw->txch);
	cpdma_chan_start(cpsw->txch);
	cpsw_intr_enable(cpsw);
1597 1598
}

1599 1600 1601 1602
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct sockaddr *addr = (struct sockaddr *)p;
1603
	struct cpsw_common *cpsw = priv->cpsw;
1604 1605
	int flags = 0;
	u16 vid = 0;
1606
	int ret;
1607 1608 1609 1610

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

1611
	ret = pm_runtime_get_sync(cpsw->dev);
1612
	if (ret < 0) {
1613
		pm_runtime_put_noidle(cpsw->dev);
1614 1615 1616
		return ret;
	}

1617 1618
	if (cpsw->data.dual_emac) {
		vid = cpsw->slaves[priv->emac_port].port_vlan;
1619 1620 1621
		flags = ALE_VLAN;
	}

1622
	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1623
			   flags, vid);
1624
	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1625 1626 1627 1628 1629 1630
			   flags, vid);

	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
	for_each_slave(priv, cpsw_set_slave_mac, priv);

1631
	pm_runtime_put(cpsw->dev);
1632

1633 1634 1635
	return 0;
}

1636 1637 1638
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
1639
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1640

1641 1642 1643 1644
	cpsw_intr_disable(cpsw);
	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
	cpsw_intr_enable(cpsw);
1645 1646 1647
}
#endif

1648 1649 1650 1651
static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
				unsigned short vid)
{
	int ret;
1652 1653
	int unreg_mcast_mask = 0;
	u32 port_mask;
1654
	struct cpsw_common *cpsw = priv->cpsw;
1655

1656
	if (cpsw->data.dual_emac) {
1657
		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1658

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
		if (priv->ndev->flags & IFF_ALLMULTI)
			unreg_mcast_mask = port_mask;
	} else {
		port_mask = ALE_ALL_PORTS;

		if (priv->ndev->flags & IFF_ALLMULTI)
			unreg_mcast_mask = ALE_ALL_PORTS;
		else
			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
	}
1669

1670
	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
1671
				unreg_mcast_mask);
1672 1673 1674
	if (ret != 0)
		return ret;

1675
	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1676
				 HOST_PORT_NUM, ALE_VLAN, vid);
1677 1678 1679
	if (ret != 0)
		goto clean_vid;

1680
	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1681
				 port_mask, ALE_VLAN, vid, 0);
1682 1683 1684 1685 1686
	if (ret != 0)
		goto clean_vlan_ucast;
	return 0;

clean_vlan_ucast:
1687
	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1688
			   HOST_PORT_NUM, ALE_VLAN, vid);
1689
clean_vid:
1690
	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1691 1692 1693 1694
	return ret;
}

static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1695
				    __be16 proto, u16 vid)
1696 1697
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1698
	struct cpsw_common *cpsw = priv->cpsw;
1699
	int ret;
1700

1701
	if (vid == cpsw->data.default_vlan)
1702 1703
		return 0;

1704
	ret = pm_runtime_get_sync(cpsw->dev);
1705
	if (ret < 0) {
1706
		pm_runtime_put_noidle(cpsw->dev);
1707 1708 1709
		return ret;
	}

1710
	if (cpsw->data.dual_emac) {
1711 1712 1713 1714 1715 1716
		/* In dual EMAC, reserved VLAN id should not be used for
		 * creating VLAN interfaces as this can break the dual
		 * EMAC port separation
		 */
		int i;

1717 1718
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (vid == cpsw->slaves[i].port_vlan)
1719 1720 1721 1722
				return -EINVAL;
		}
	}

1723
	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1724 1725
	ret = cpsw_add_vlan_ale_entry(priv, vid);

1726
	pm_runtime_put(cpsw->dev);
1727
	return ret;
1728 1729 1730
}

static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1731
				     __be16 proto, u16 vid)
1732 1733
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1734
	struct cpsw_common *cpsw = priv->cpsw;
1735 1736
	int ret;

1737
	if (vid == cpsw->data.default_vlan)
1738 1739
		return 0;

1740
	ret = pm_runtime_get_sync(cpsw->dev);
1741
	if (ret < 0) {
1742
		pm_runtime_put_noidle(cpsw->dev);
1743 1744 1745
		return ret;
	}

1746
	if (cpsw->data.dual_emac) {
1747 1748
		int i;

1749 1750
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (vid == cpsw->slaves[i].port_vlan)
1751 1752 1753 1754
				return -EINVAL;
		}
	}

1755
	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1756
	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1757 1758 1759
	if (ret != 0)
		return ret;

1760
	ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1761
				 HOST_PORT_NUM, ALE_VLAN, vid);
1762 1763 1764
	if (ret != 0)
		return ret;

1765
	ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1766
				 0, ALE_VLAN, vid);
1767
	pm_runtime_put(cpsw->dev);
1768
	return ret;
1769 1770
}

1771 1772 1773 1774
static const struct net_device_ops cpsw_netdev_ops = {
	.ndo_open		= cpsw_ndo_open,
	.ndo_stop		= cpsw_ndo_stop,
	.ndo_start_xmit		= cpsw_ndo_start_xmit,
1775
	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
1776
	.ndo_do_ioctl		= cpsw_ndo_ioctl,
1777
	.ndo_validate_addr	= eth_validate_addr,
1778
	.ndo_change_mtu		= eth_change_mtu,
1779
	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
1780
	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
1781 1782 1783
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= cpsw_ndo_poll_controller,
#endif
1784 1785
	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
1786 1787
};

1788 1789
static int cpsw_get_regs_len(struct net_device *ndev)
{
1790
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1791

1792
	return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
1793 1794 1795 1796 1797 1798
}

static void cpsw_get_regs(struct net_device *ndev,
			  struct ethtool_regs *regs, void *p)
{
	u32 *reg = p;
1799
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1800 1801

	/* update CPSW IP version */
1802
	regs->version = cpsw->version;
1803

1804
	cpsw_ale_dump(cpsw->ale, reg);
1805 1806
}

1807 1808 1809
static void cpsw_get_drvinfo(struct net_device *ndev,
			     struct ethtool_drvinfo *info)
{
1810
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1811
	struct platform_device	*pdev = to_platform_device(cpsw->dev);
1812

1813
	strlcpy(info->driver, "cpsw", sizeof(info->driver));
1814
	strlcpy(info->version, "1.0", sizeof(info->version));
1815
	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
}

static u32 cpsw_get_msglevel(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	return priv->msg_enable;
}

static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	priv->msg_enable = value;
}

1830 1831 1832 1833
static int cpsw_get_ts_info(struct net_device *ndev,
			    struct ethtool_ts_info *info)
{
#ifdef CONFIG_TI_CPTS
1834
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1835 1836 1837 1838 1839 1840 1841 1842

	info->so_timestamping =
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE |
		SOF_TIMESTAMPING_RAW_HARDWARE;
1843
	info->phc_index = cpsw->cpts->phc_index;
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
	info->tx_types =
		(1 << HWTSTAMP_TX_OFF) |
		(1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
#else
	info->so_timestamping =
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE;
	info->phc_index = -1;
	info->tx_types = 0;
	info->rx_filters = 0;
#endif
	return 0;
}

1862 1863 1864 1865
static int cpsw_get_settings(struct net_device *ndev,
			     struct ethtool_cmd *ecmd)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1866 1867
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
1868

1869 1870
	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_gset(cpsw->slaves[slave_no].phy, ecmd);
1871 1872 1873 1874 1875 1876 1877
	else
		return -EOPNOTSUPP;
}

static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1878 1879
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
1880

1881 1882
	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_sset(cpsw->slaves[slave_no].phy, ecmd);
1883 1884 1885 1886
	else
		return -EOPNOTSUPP;
}

1887 1888 1889
static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1890 1891
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
1892 1893 1894 1895

	wol->supported = 0;
	wol->wolopts = 0;

1896 1897
	if (cpsw->slaves[slave_no].phy)
		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
1898 1899 1900 1901 1902
}

static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1903 1904
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
1905

1906 1907
	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
1908 1909 1910 1911
	else
		return -EOPNOTSUPP;
}

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
static void cpsw_get_pauseparam(struct net_device *ndev,
				struct ethtool_pauseparam *pause)
{
	struct cpsw_priv *priv = netdev_priv(ndev);

	pause->autoneg = AUTONEG_DISABLE;
	pause->rx_pause = priv->rx_pause ? true : false;
	pause->tx_pause = priv->tx_pause ? true : false;
}

static int cpsw_set_pauseparam(struct net_device *ndev,
			       struct ethtool_pauseparam *pause)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	bool link;

	priv->rx_pause = pause->rx_pause ? true : false;
	priv->tx_pause = pause->tx_pause ? true : false;

	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
	return 0;
}

1935 1936 1937
static int cpsw_ethtool_op_begin(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1938
	struct cpsw_common *cpsw = priv->cpsw;
1939 1940
	int ret;

1941
	ret = pm_runtime_get_sync(cpsw->dev);
1942 1943
	if (ret < 0) {
		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
1944
		pm_runtime_put_noidle(cpsw->dev);
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
	}

	return ret;
}

static void cpsw_ethtool_op_complete(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	int ret;

1955
	ret = pm_runtime_put(priv->cpsw->dev);
1956 1957 1958 1959
	if (ret < 0)
		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
}

1960 1961 1962 1963 1964
static const struct ethtool_ops cpsw_ethtool_ops = {
	.get_drvinfo	= cpsw_get_drvinfo,
	.get_msglevel	= cpsw_get_msglevel,
	.set_msglevel	= cpsw_set_msglevel,
	.get_link	= ethtool_op_get_link,
1965
	.get_ts_info	= cpsw_get_ts_info,
1966 1967
	.get_settings	= cpsw_get_settings,
	.set_settings	= cpsw_set_settings,
1968 1969
	.get_coalesce	= cpsw_get_coalesce,
	.set_coalesce	= cpsw_set_coalesce,
1970 1971 1972
	.get_sset_count		= cpsw_get_sset_count,
	.get_strings		= cpsw_get_strings,
	.get_ethtool_stats	= cpsw_get_ethtool_stats,
1973 1974
	.get_pauseparam		= cpsw_get_pauseparam,
	.set_pauseparam		= cpsw_set_pauseparam,
1975 1976
	.get_wol	= cpsw_get_wol,
	.set_wol	= cpsw_set_wol,
1977 1978
	.get_regs_len	= cpsw_get_regs_len,
	.get_regs	= cpsw_get_regs,
1979 1980
	.begin		= cpsw_ethtool_op_begin,
	.complete	= cpsw_ethtool_op_complete,
1981 1982
};

1983
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
1984
			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
1985
{
1986
	void __iomem		*regs = cpsw->regs;
1987
	int			slave_num = slave->slave_num;
1988
	struct cpsw_slave_data	*data = cpsw->data.slave_data + slave_num;
1989 1990

	slave->data	= data;
1991 1992
	slave->regs	= regs + slave_reg_ofs;
	slave->sliver	= regs + sliver_reg_ofs;
1993
	slave->port_vlan = data->dual_emac_res_vlan;
1994 1995
}

1996
static int cpsw_probe_dt(struct cpsw_platform_data *data,
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007
			 struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	struct device_node *slave_node;
	int i = 0, ret;
	u32 prop;

	if (!node)
		return -EINVAL;

	if (of_property_read_u32(node, "slaves", &prop)) {
2008
		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2009 2010 2011 2012
		return -EINVAL;
	}
	data->slaves = prop;

2013
	if (of_property_read_u32(node, "active_slave", &prop)) {
2014
		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2015
		return -EINVAL;
2016
	}
2017
	data->active_slave = prop;
2018

2019
	if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
2020
		dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
2021
		return -EINVAL;
2022 2023 2024 2025
	}
	data->cpts_clock_mult = prop;

	if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
2026
		dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
2027
		return -EINVAL;
2028 2029 2030
	}
	data->cpts_clock_shift = prop;

2031 2032 2033
	data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
					* sizeof(struct cpsw_slave_data),
					GFP_KERNEL);
2034
	if (!data->slave_data)
2035
		return -ENOMEM;
2036 2037

	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2038
		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2039
		return -EINVAL;
2040 2041 2042 2043
	}
	data->channels = prop;

	if (of_property_read_u32(node, "ale_entries", &prop)) {
2044
		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2045
		return -EINVAL;
2046 2047 2048 2049
	}
	data->ale_entries = prop;

	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2050
		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2051
		return -EINVAL;
2052 2053 2054 2055
	}
	data->bd_ram_size = prop;

	if (of_property_read_u32(node, "mac_control", &prop)) {
2056
		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2057
		return -EINVAL;
2058 2059 2060
	}
	data->mac_control = prop;

2061 2062
	if (of_property_read_bool(node, "dual_emac"))
		data->dual_emac = 1;
2063

2064 2065 2066 2067 2068 2069
	/*
	 * Populate all the child nodes here...
	 */
	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
	/* We do not want to force this, as in some cases may not have child */
	if (ret)
2070
		dev_warn(&pdev->dev, "Doesn't have any child node\n");
2071

2072
	for_each_available_child_of_node(node, slave_node) {
2073 2074
		struct cpsw_slave_data *slave_data = data->slave_data + i;
		const void *mac_addr = NULL;
2075 2076 2077
		int lenp;
		const __be32 *parp;

2078 2079 2080 2081
		/* This is no slave child node, continue */
		if (strcmp(slave_node->name, "slave"))
			continue;

2082 2083
		slave_data->phy_node = of_parse_phandle(slave_node,
							"phy-handle", 0);
2084
		parp = of_get_property(slave_node, "phy_id", &lenp);
2085 2086 2087 2088 2089
		if (slave_data->phy_node) {
			dev_dbg(&pdev->dev,
				"slave[%d] using phy-handle=\"%s\"\n",
				i, slave_data->phy_node->full_name);
		} else if (of_phy_is_fixed_link(slave_node)) {
2090 2091 2092
			/* In the case of a fixed PHY, the DT node associated
			 * to the PHY is the Ethernet MAC DT node.
			 */
2093 2094 2095
			ret = of_phy_register_fixed_link(slave_node);
			if (ret)
				return ret;
2096
			slave_data->phy_node = of_node_get(slave_node);
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
		} else if (parp) {
			u32 phyid;
			struct device_node *mdio_node;
			struct platform_device *mdio;

			if (lenp != (sizeof(__be32) * 2)) {
				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
				goto no_phy_slave;
			}
			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
			phyid = be32_to_cpup(parp+1);
			mdio = of_find_device_by_node(mdio_node);
			of_node_put(mdio_node);
			if (!mdio) {
				dev_err(&pdev->dev, "Missing mdio platform device\n");
				return -EINVAL;
			}
			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
				 PHY_ID_FMT, mdio->name, phyid);
		} else {
2117 2118 2119
			dev_err(&pdev->dev,
				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
				i);
2120
			goto no_phy_slave;
2121
		}
2122 2123 2124 2125 2126 2127 2128 2129
		slave_data->phy_if = of_get_phy_mode(slave_node);
		if (slave_data->phy_if < 0) {
			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
				i);
			return slave_data->phy_if;
		}

no_phy_slave:
2130
		mac_addr = of_get_mac_address(slave_node);
2131
		if (mac_addr) {
2132
			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2133
		} else {
2134 2135 2136 2137
			ret = ti_cm_get_macid(&pdev->dev, i,
					      slave_data->mac_addr);
			if (ret)
				return ret;
2138
		}
2139
		if (data->dual_emac) {
2140
			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2141
						 &prop)) {
2142
				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2143
				slave_data->dual_emac_res_vlan = i+1;
2144 2145
				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
					slave_data->dual_emac_res_vlan, i);
2146 2147 2148 2149 2150
			} else {
				slave_data->dual_emac_res_vlan = prop;
			}
		}

2151
		i++;
2152 2153
		if (i == data->slaves)
			break;
2154 2155 2156 2157 2158
	}

	return 0;
}

2159
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2160
{
2161 2162
	struct cpsw_common		*cpsw = priv->cpsw;
	struct cpsw_platform_data	*data = &cpsw->data;
2163 2164
	struct net_device		*ndev;
	struct cpsw_priv		*priv_sl2;
2165
	int ret = 0;
2166 2167 2168

	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
	if (!ndev) {
2169
		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2170 2171 2172 2173
		return -ENOMEM;
	}

	priv_sl2 = netdev_priv(ndev);
2174
	priv_sl2->cpsw = cpsw;
2175 2176 2177 2178 2179 2180 2181
	priv_sl2->ndev = ndev;
	priv_sl2->dev  = &ndev->dev;
	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);

	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
			ETH_ALEN);
2182 2183
		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
			 priv_sl2->mac_addr);
2184 2185
	} else {
		random_ether_addr(priv_sl2->mac_addr);
2186 2187
		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
			 priv_sl2->mac_addr);
2188 2189 2190 2191
	}
	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);

	priv_sl2->emac_port = 1;
2192
	cpsw->slaves[1].ndev = ndev;
2193
	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2194 2195

	ndev->netdev_ops = &cpsw_netdev_ops;
2196
	ndev->ethtool_ops = &cpsw_ethtool_ops;
2197 2198

	/* register the network device */
2199
	SET_NETDEV_DEV(ndev, cpsw->dev);
2200 2201
	ret = register_netdev(ndev);
	if (ret) {
2202
		dev_err(cpsw->dev, "cpsw: error registering net device\n");
2203 2204 2205 2206 2207 2208 2209
		free_netdev(ndev);
		ret = -ENODEV;
	}

	return ret;
}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
#define CPSW_QUIRK_IRQ		BIT(0)

static struct platform_device_id cpsw_devtype[] = {
	{
		/* keep it for existing comaptibles */
		.name = "cpsw",
		.driver_data = CPSW_QUIRK_IRQ,
	}, {
		.name = "am335x-cpsw",
		.driver_data = CPSW_QUIRK_IRQ,
	}, {
		.name = "am4372-cpsw",
		.driver_data = 0,
	}, {
		.name = "dra7-cpsw",
		.driver_data = 0,
	}, {
		/* sentinel */
	}
};
MODULE_DEVICE_TABLE(platform, cpsw_devtype);

enum ti_cpsw_type {
	CPSW = 0,
	AM335X_CPSW,
	AM4372_CPSW,
	DRA7_CPSW,
};

static const struct of_device_id cpsw_of_mtable[] = {
	{ .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
	{ .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
	{ .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
	{ .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
	{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cpsw_of_mtable);

B
Bill Pemberton 已提交
2248
static int cpsw_probe(struct platform_device *pdev)
2249
{
2250
	struct clk			*clk;
2251
	struct cpsw_platform_data	*data;
2252 2253 2254 2255
	struct net_device		*ndev;
	struct cpsw_priv		*priv;
	struct cpdma_params		dma_params;
	struct cpsw_ale_params		ale_params;
2256 2257
	void __iomem			*ss_regs;
	struct resource			*res, *ss_res;
2258
	const struct of_device_id	*of_id;
2259
	struct gpio_descs		*mode;
2260
	u32 slave_offset, sliver_offset, slave_size;
2261
	struct cpsw_common		*cpsw;
2262 2263
	int ret = 0, i;
	int irq;
2264

2265
	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
2266
	cpsw->dev = &pdev->dev;
2267

2268 2269
	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
	if (!ndev) {
2270
		dev_err(&pdev->dev, "error allocating net_device\n");
2271 2272 2273 2274 2275
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, ndev);
	priv = netdev_priv(ndev);
2276
	priv->cpsw = cpsw;
2277 2278 2279
	priv->ndev = ndev;
	priv->dev  = &ndev->dev;
	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2280 2281 2282
	cpsw->rx_packet_max = max(rx_packet_max, 128);
	cpsw->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
	if (!cpsw->cpts) {
2283
		dev_err(&pdev->dev, "error allocating cpts\n");
2284
		ret = -ENOMEM;
2285 2286
		goto clean_ndev_ret;
	}
2287

2288 2289 2290 2291 2292 2293 2294
	mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
	if (IS_ERR(mode)) {
		ret = PTR_ERR(mode);
		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
		goto clean_ndev_ret;
	}

2295 2296 2297 2298 2299
	/*
	 * This may be required here for child devices.
	 */
	pm_runtime_enable(&pdev->dev);

2300 2301 2302
	/* Select default pin state */
	pinctrl_pm_select_default_state(&pdev->dev);

2303
	if (cpsw_probe_dt(&cpsw->data, pdev)) {
2304
		dev_err(&pdev->dev, "cpsw: platform data missing\n");
2305
		ret = -ENODEV;
2306
		goto clean_runtime_disable_ret;
2307
	}
2308
	data = &cpsw->data;
2309

2310 2311
	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2312
		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
2313
	} else {
J
Joe Perches 已提交
2314
		eth_random_addr(priv->mac_addr);
2315
		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
2316 2317 2318 2319
	}

	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);

2320
	cpsw->slaves = devm_kzalloc(&pdev->dev,
2321 2322
				    sizeof(struct cpsw_slave) * data->slaves,
				    GFP_KERNEL);
2323
	if (!cpsw->slaves) {
2324 2325
		ret = -ENOMEM;
		goto clean_runtime_disable_ret;
2326 2327
	}
	for (i = 0; i < data->slaves; i++)
2328
		cpsw->slaves[i].slave_num = i;
2329

2330
	cpsw->slaves[0].ndev = ndev;
2331 2332
	priv->emac_port = 0;

2333 2334
	clk = devm_clk_get(&pdev->dev, "fck");
	if (IS_ERR(clk)) {
2335
		dev_err(priv->dev, "fck is not found\n");
2336
		ret = -ENODEV;
2337
		goto clean_runtime_disable_ret;
2338
	}
2339
	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2340

2341 2342 2343 2344 2345
	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
	if (IS_ERR(ss_regs)) {
		ret = PTR_ERR(ss_regs);
		goto clean_runtime_disable_ret;
2346
	}
2347
	cpsw->regs = ss_regs;
2348

2349 2350 2351
	/* Need to enable clocks with runtime PM api to access module
	 * registers
	 */
2352 2353 2354 2355 2356
	ret = pm_runtime_get_sync(&pdev->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(&pdev->dev);
		goto clean_runtime_disable_ret;
	}
2357
	cpsw->version = readl(&cpsw->regs->id_ver);
2358 2359
	pm_runtime_put_sync(&pdev->dev);

2360
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2361 2362 2363
	cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(cpsw->wr_regs)) {
		ret = PTR_ERR(cpsw->wr_regs);
2364
		goto clean_runtime_disable_ret;
2365 2366 2367
	}

	memset(&dma_params, 0, sizeof(dma_params));
2368 2369
	memset(&ale_params, 0, sizeof(ale_params));

2370
	switch (cpsw->version) {
2371
	case CPSW_VERSION_1:
2372
		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
2373
		cpsw->cpts->reg      = ss_regs + CPSW1_CPTS_OFFSET;
2374
		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
2375 2376 2377 2378 2379 2380 2381 2382 2383
		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
		slave_offset         = CPSW1_SLAVE_OFFSET;
		slave_size           = CPSW1_SLAVE_SIZE;
		sliver_offset        = CPSW1_SLIVER_OFFSET;
		dma_params.desc_mem_phys = 0;
		break;
	case CPSW_VERSION_2:
2384
	case CPSW_VERSION_3:
2385
	case CPSW_VERSION_4:
2386
		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
2387
		cpsw->cpts->reg      = ss_regs + CPSW2_CPTS_OFFSET;
2388
		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
2389 2390 2391 2392 2393 2394 2395
		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
		slave_offset         = CPSW2_SLAVE_OFFSET;
		slave_size           = CPSW2_SLAVE_SIZE;
		sliver_offset        = CPSW2_SLIVER_OFFSET;
		dma_params.desc_mem_phys =
2396
			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
2397 2398
		break;
	default:
2399
		dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
2400
		ret = -ENODEV;
2401
		goto clean_runtime_disable_ret;
2402
	}
2403 2404 2405 2406
	for (i = 0; i < cpsw->data.slaves; i++) {
		struct cpsw_slave *slave = &cpsw->slaves[i];

		cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
2407 2408 2409 2410
		slave_offset  += slave_size;
		sliver_offset += SLIVER_SIZE;
	}

2411
	dma_params.dev		= &pdev->dev;
2412 2413 2414 2415 2416
	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
2417 2418 2419 2420 2421 2422 2423

	dma_params.num_chan		= data->channels;
	dma_params.has_soft_reset	= true;
	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
	dma_params.desc_mem_size	= data->bd_ram_size;
	dma_params.desc_align		= 16;
	dma_params.has_ext_regs		= true;
2424
	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
2425

2426 2427
	cpsw->dma = cpdma_ctlr_create(&dma_params);
	if (!cpsw->dma) {
2428 2429
		dev_err(priv->dev, "error initializing dma\n");
		ret = -ENOMEM;
2430
		goto clean_runtime_disable_ret;
2431 2432
	}

2433
	cpsw->txch = cpdma_chan_create(cpsw->dma, tx_chan_num(0),
2434
				       cpsw_tx_handler);
2435
	cpsw->rxch = cpdma_chan_create(cpsw->dma, rx_chan_num(0),
2436 2437
				       cpsw_rx_handler);

2438
	if (WARN_ON(!cpsw->txch || !cpsw->rxch)) {
2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
		dev_err(priv->dev, "error initializing dma channels\n");
		ret = -ENOMEM;
		goto clean_dma_ret;
	}

	ale_params.dev			= &ndev->dev;
	ale_params.ale_ageout		= ale_ageout;
	ale_params.ale_entries		= data->ale_entries;
	ale_params.ale_ports		= data->slaves;

2449 2450
	cpsw->ale = cpsw_ale_create(&ale_params);
	if (!cpsw->ale) {
2451 2452 2453 2454 2455
		dev_err(priv->dev, "error initializing ale engine\n");
		ret = -ENODEV;
		goto clean_dma_ret;
	}

2456
	ndev->irq = platform_get_irq(pdev, 1);
2457 2458
	if (ndev->irq < 0) {
		dev_err(priv->dev, "error getting irq resource\n");
2459
		ret = ndev->irq;
2460 2461 2462
		goto clean_ale_ret;
	}

2463 2464 2465 2466
	of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
	if (of_id) {
		pdev->id_entry = of_id->data;
		if (pdev->id_entry->driver_data)
2467
			cpsw->quirk_irq = true;
2468 2469
	}

2470 2471 2472 2473 2474 2475 2476
	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
	 * MISC IRQs which are always kept disabled with this driver so
	 * we will not request them.
	 *
	 * If anyone wants to implement support for those, make sure to
	 * first request and append them to irqs_table array.
	 */
2477

2478
	/* RX IRQ */
2479
	irq = platform_get_irq(pdev, 1);
2480 2481
	if (irq < 0) {
		ret = irq;
2482
		goto clean_ale_ret;
2483
	}
2484

2485
	cpsw->irqs_table[0] = irq;
2486
	ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
2487
			       0, dev_name(&pdev->dev), cpsw);
2488 2489 2490 2491 2492
	if (ret < 0) {
		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
		goto clean_ale_ret;
	}

2493
	/* TX IRQ */
2494
	irq = platform_get_irq(pdev, 2);
2495 2496
	if (irq < 0) {
		ret = irq;
2497
		goto clean_ale_ret;
2498
	}
2499

2500
	cpsw->irqs_table[1] = irq;
2501
	ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
2502
			       0, dev_name(&pdev->dev), cpsw);
2503 2504 2505
	if (ret < 0) {
		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
		goto clean_ale_ret;
2506
	}
2507

2508
	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2509 2510

	ndev->netdev_ops = &cpsw_netdev_ops;
2511
	ndev->ethtool_ops = &cpsw_ethtool_ops;
2512 2513
	netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
	netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
2514 2515 2516 2517 2518 2519 2520

	/* register the network device */
	SET_NETDEV_DEV(ndev, &pdev->dev);
	ret = register_netdev(ndev);
	if (ret) {
		dev_err(priv->dev, "error registering net device\n");
		ret = -ENODEV;
2521
		goto clean_ale_ret;
2522 2523
	}

2524 2525
	cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
		    &ss_res->start, ndev->irq);
2526

2527
	if (cpsw->data.dual_emac) {
2528
		ret = cpsw_probe_dual_emac(priv);
2529 2530
		if (ret) {
			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2531
			goto clean_ale_ret;
2532 2533 2534
		}
	}

2535 2536 2537
	return 0;

clean_ale_ret:
2538
	cpsw_ale_destroy(cpsw->ale);
2539
clean_dma_ret:
2540
	cpdma_ctlr_destroy(cpsw->dma);
2541
clean_runtime_disable_ret:
2542
	pm_runtime_disable(&pdev->dev);
2543
clean_ndev_ret:
2544
	free_netdev(priv->ndev);
2545 2546 2547
	return ret;
}

B
Bill Pemberton 已提交
2548
static int cpsw_remove(struct platform_device *pdev)
2549 2550
{
	struct net_device *ndev = platform_get_drvdata(pdev);
2551
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2552 2553 2554 2555 2556 2557 2558
	int ret;

	ret = pm_runtime_get_sync(&pdev->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(&pdev->dev);
		return ret;
	}
2559

2560 2561
	if (cpsw->data.dual_emac)
		unregister_netdev(cpsw->slaves[1].ndev);
2562
	unregister_netdev(ndev);
2563

2564
	cpsw_ale_destroy(cpsw->ale);
2565
	cpdma_ctlr_destroy(cpsw->dma);
2566
	of_platform_depopulate(&pdev->dev);
2567 2568
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
2569 2570
	if (cpsw->data.dual_emac)
		free_netdev(cpsw->slaves[1].ndev);
2571 2572 2573 2574
	free_netdev(ndev);
	return 0;
}

2575
#ifdef CONFIG_PM_SLEEP
2576 2577 2578 2579
static int cpsw_suspend(struct device *dev)
{
	struct platform_device	*pdev = to_platform_device(dev);
	struct net_device	*ndev = platform_get_drvdata(pdev);
2580
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
2581

2582
	if (cpsw->data.dual_emac) {
2583
		int i;
2584

2585 2586 2587
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (netif_running(cpsw->slaves[i].ndev))
				cpsw_ndo_stop(cpsw->slaves[i].ndev);
2588 2589 2590 2591 2592
		}
	} else {
		if (netif_running(ndev))
			cpsw_ndo_stop(ndev);
	}
2593

2594
	/* Select sleep pin state */
2595
	pinctrl_pm_select_sleep_state(dev);
2596

2597 2598 2599 2600 2601 2602 2603
	return 0;
}

static int cpsw_resume(struct device *dev)
{
	struct platform_device	*pdev = to_platform_device(dev);
	struct net_device	*ndev = platform_get_drvdata(pdev);
2604
	struct cpsw_common	*cpsw = netdev_priv(ndev);
2605

2606
	/* Select default pin state */
2607
	pinctrl_pm_select_default_state(dev);
2608

2609
	if (cpsw->data.dual_emac) {
2610 2611
		int i;

2612 2613 2614
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (netif_running(cpsw->slaves[i].ndev))
				cpsw_ndo_open(cpsw->slaves[i].ndev);
2615 2616 2617 2618 2619
		}
	} else {
		if (netif_running(ndev))
			cpsw_ndo_open(ndev);
	}
2620 2621
	return 0;
}
2622
#endif
2623

2624
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2625 2626 2627 2628 2629

static struct platform_driver cpsw_driver = {
	.driver = {
		.name	 = "cpsw",
		.pm	 = &cpsw_pm_ops,
2630
		.of_match_table = cpsw_of_mtable,
2631 2632
	},
	.probe = cpsw_probe,
B
Bill Pemberton 已提交
2633
	.remove = cpsw_remove,
2634 2635
};

2636
module_platform_driver(cpsw_driver);
2637 2638 2639 2640 2641

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
MODULE_DESCRIPTION("TI CPSW Ethernet driver");