cpsw.c 100.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Texas Instruments Ethernet Switch Driver
 *
 * Copyright (C) 2012 Texas Instruments
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation version 2.
 *
 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 * kind, whether express or implied; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
27
#include <linux/net_tstamp.h>
28
#include <linux/phy.h>
29
#include <linux/phy/phy.h>
30 31
#include <linux/workqueue.h>
#include <linux/delay.h>
32
#include <linux/pm_runtime.h>
33
#include <linux/gpio/consumer.h>
34
#include <linux/of.h>
35
#include <linux/of_mdio.h>
36 37
#include <linux/of_net.h>
#include <linux/of_device.h>
38
#include <linux/if_vlan.h>
39
#include <linux/kmemleak.h>
40
#include <linux/sys_soc.h>
41

42
#include <linux/pinctrl/consumer.h>
43
#include <net/pkt_cls.h>
44

45
#include "cpsw.h"
46
#include "cpsw_ale.h"
47
#include "cpts.h"
48 49
#include "davinci_cpdma.h"

50 51
#include <net/pkt_sched.h>

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
#define CPSW_DEBUG	(NETIF_MSG_HW		| NETIF_MSG_WOL		| \
			 NETIF_MSG_DRV		| NETIF_MSG_LINK	| \
			 NETIF_MSG_IFUP		| NETIF_MSG_INTR	| \
			 NETIF_MSG_PROBE	| NETIF_MSG_TIMER	| \
			 NETIF_MSG_IFDOWN	| NETIF_MSG_RX_ERR	| \
			 NETIF_MSG_TX_ERR	| NETIF_MSG_TX_DONE	| \
			 NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	| \
			 NETIF_MSG_RX_STATUS)

#define cpsw_info(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_info(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_err(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_err(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_dbg(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_dbg(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

#define cpsw_notice(priv, type, format, ...)		\
do {								\
	if (netif_msg_##type(priv) && net_ratelimit())		\
		dev_notice(priv->dev, format, ## __VA_ARGS__);	\
} while (0)

85 86
#define ALE_ALL_PORTS		0x7

87 88 89 90
#define CPSW_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
#define CPSW_MINOR_VERSION(reg)		(reg & 0xff)
#define CPSW_RTL_VERSION(reg)		((reg >> 11) & 0x1f)

91 92
#define CPSW_VERSION_1		0x19010a
#define CPSW_VERSION_2		0x19010c
93
#define CPSW_VERSION_3		0x19010f
94
#define CPSW_VERSION_4		0x190112
95 96

#define HOST_PORT_NUM		0
97
#define CPSW_ALE_PORTS_NUM	3
98 99 100 101 102 103 104
#define SLIVER_SIZE		0x40

#define CPSW1_HOST_PORT_OFFSET	0x028
#define CPSW1_SLAVE_OFFSET	0x050
#define CPSW1_SLAVE_SIZE	0x040
#define CPSW1_CPDMA_OFFSET	0x100
#define CPSW1_STATERAM_OFFSET	0x200
105
#define CPSW1_HW_STATS		0x400
106 107 108 109 110 111 112 113
#define CPSW1_CPTS_OFFSET	0x500
#define CPSW1_ALE_OFFSET	0x600
#define CPSW1_SLIVER_OFFSET	0x700

#define CPSW2_HOST_PORT_OFFSET	0x108
#define CPSW2_SLAVE_OFFSET	0x200
#define CPSW2_SLAVE_SIZE	0x100
#define CPSW2_CPDMA_OFFSET	0x800
114
#define CPSW2_HW_STATS		0x900
115 116 117 118 119 120
#define CPSW2_STATERAM_OFFSET	0xa00
#define CPSW2_CPTS_OFFSET	0xc00
#define CPSW2_ALE_OFFSET	0xd00
#define CPSW2_SLIVER_OFFSET	0xd80
#define CPSW2_BD_OFFSET		0x2000

121 122 123 124 125 126 127 128
#define CPDMA_RXTHRESH		0x0c0
#define CPDMA_RXFREE		0x0e0
#define CPDMA_TXHDP		0x00
#define CPDMA_RXHDP		0x20
#define CPDMA_TXCP		0x40
#define CPDMA_RXCP		0x60

#define CPSW_POLL_WEIGHT	64
129
#define CPSW_RX_VLAN_ENCAP_HDR_SIZE		4
130
#define CPSW_MIN_PACKET_SIZE	(VLAN_ETH_ZLEN)
131 132 133
#define CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN +\
				 ETH_FCS_LEN +\
				 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
134 135 136

#define RX_PRIORITY_MAPPING	0x76543210
#define TX_PRIORITY_MAPPING	0x33221100
137
#define CPDMA_TX_PRIORITY_MAP	0x76543210
138

139
#define CPSW_VLAN_AWARE		BIT(1)
140
#define CPSW_RX_VLAN_ENCAP	BIT(2)
141 142
#define CPSW_ALE_VLAN_AWARE	1

143 144 145
#define CPSW_FIFO_NORMAL_MODE		(0 << 16)
#define CPSW_FIFO_DUAL_MAC_MODE		(1 << 16)
#define CPSW_FIFO_RATE_LIMIT_MODE	(2 << 16)
146

147 148 149 150 151 152 153
#define CPSW_INTPACEEN		(0x3f << 16)
#define CPSW_INTPRESCALE_MASK	(0x7FF << 0)
#define CPSW_CMINTMAX_CNT	63
#define CPSW_CMINTMIN_CNT	2
#define CPSW_CMINTMAX_INTVL	(1000 / CPSW_CMINTMIN_CNT)
#define CPSW_CMINTMIN_INTVL	((1000 / CPSW_CMINTMAX_CNT) + 1)

154 155 156
#define cpsw_slave_index(cpsw, priv)				\
		((cpsw->data.dual_emac) ? priv->emac_port :	\
		cpsw->data.active_slave)
157
#define IRQ_NUM			2
158
#define CPSW_MAX_QUEUES		8
159
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
160 161 162
#define CPSW_FIFO_QUEUE_TYPE_SHIFT	16
#define CPSW_FIFO_SHAPE_EN_SHIFT	16
#define CPSW_FIFO_RATE_EN_SHIFT		20
163 164
#define CPSW_TC_NUM			4
#define CPSW_FIFO_SHAPERS_NUM		(CPSW_TC_NUM - 1)
165
#define CPSW_PCT_MASK			0x7f
166

167 168 169 170 171 172 173 174 175 176 177 178
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT	29
#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK		GENMASK(2, 0)
#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT	16
#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT	8
#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK	GENMASK(1, 0)
enum {
	CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
	CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
	CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
	CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
};

179 180 181 182 183 184 185 186 187 188 189 190
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");

static int ale_ageout = 10;
module_param(ale_ageout, int, 0);
MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");

static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
module_param(rx_packet_max, int, 0);
MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");

191 192 193 194
static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");

195
struct cpsw_wr_regs {
196 197 198 199 200 201 202 203
	u32	id_ver;
	u32	soft_reset;
	u32	control;
	u32	int_control;
	u32	rx_thresh_en;
	u32	rx_en;
	u32	tx_en;
	u32	misc_en;
204 205 206 207 208 209 210 211 212
	u32	mem_allign1[8];
	u32	rx_thresh_stat;
	u32	rx_stat;
	u32	tx_stat;
	u32	misc_stat;
	u32	mem_allign2[8];
	u32	rx_imax;
	u32	tx_imax;

213 214
};

215
struct cpsw_ss_regs {
216 217 218 219 220
	u32	id_ver;
	u32	control;
	u32	soft_reset;
	u32	stat_port_en;
	u32	ptype;
221 222 223 224 225 226 227 228
	u32	soft_idle;
	u32	thru_rate;
	u32	gap_thresh;
	u32	tx_start_wds;
	u32	flow_control;
	u32	vlan_ltype;
	u32	ts_ltype;
	u32	dlr_ltype;
229 230
};

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
/* CPSW_PORT_V1 */
#define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
#define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
#define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
#define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
#define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
#define CPSW1_TS_CTL        0x14 /* Time Sync Control */
#define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
#define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */

/* CPSW_PORT_V2 */
#define CPSW2_CONTROL       0x00 /* Control Register */
#define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
#define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
#define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
#define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
#define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
#define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */

/* CPSW_PORT_V1 and V2 */
#define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
#define SA_HI               0x24 /* CPGMAC_SL Source Address High */
#define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */

/* CPSW_PORT_V2 only */
#define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
#define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */

/* Bit definitions for the CPSW2_CONTROL register */
266 267 268 269
#define PASS_PRI_TAGGED     BIT(24) /* Pass Priority Tagged */
#define VLAN_LTYPE2_EN      BIT(21) /* VLAN LTYPE 2 enable */
#define VLAN_LTYPE1_EN      BIT(20) /* VLAN LTYPE 1 enable */
#define DSCP_PRI_EN         BIT(16) /* DSCP Priority Enable */
270
#define TS_107              BIT(15) /* Tyme Sync Dest IP Address 107 */
271 272 273 274 275 276 277 278 279 280 281 282 283
#define TS_320              BIT(14) /* Time Sync Dest Port 320 enable */
#define TS_319              BIT(13) /* Time Sync Dest Port 319 enable */
#define TS_132              BIT(12) /* Time Sync Dest IP Addr 132 enable */
#define TS_131              BIT(11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130              BIT(10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129              BIT(9)  /* Time Sync Dest IP Addr 129 enable */
#define TS_TTL_NONZERO      BIT(8)  /* Time Sync Time To Live Non-zero enable */
#define TS_ANNEX_F_EN       BIT(6)  /* Time Sync Annex F enable */
#define TS_ANNEX_D_EN       BIT(4)  /* Time Sync Annex D enable */
#define TS_LTYPE2_EN        BIT(3)  /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN        BIT(2)  /* Time Sync LTYPE 1 enable */
#define TS_TX_EN            BIT(1)  /* Time Sync Transmit Enable */
#define TS_RX_EN            BIT(0)  /* Time Sync Receive Enable */
284

285 286
#define CTRL_V2_TS_BITS \
	(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
287
	 TS_TTL_NONZERO  | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
288

289 290 291 292 293 294
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS  (CTRL_V2_TS_BITS | TS_TX_EN)
#define CTRL_V2_RX_TS_BITS  (CTRL_V2_TS_BITS | TS_RX_EN)


#define CTRL_V3_TS_BITS \
295
	(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
296
	 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
297
	 TS_LTYPE1_EN | VLAN_LTYPE1_EN)
298 299 300 301

#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS  (CTRL_V3_TS_BITS | TS_TX_EN)
#define CTRL_V3_RX_TS_BITS  (CTRL_V3_TS_BITS | TS_RX_EN)
302 303 304 305 306 307 308 309 310

/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
#define TS_SEQ_ID_OFFSET_MASK    (0x3f)
#define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
#define TS_MSG_TYPE_EN_MASK      (0xffff)

/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
311

312 313 314 315 316 317 318 319
/* Bit definitions for the CPSW1_TS_CTL register */
#define CPSW_V1_TS_RX_EN		BIT(0)
#define CPSW_V1_TS_TX_EN		BIT(4)
#define CPSW_V1_MSG_TYPE_OFS		16

/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT	16

320 321 322 323
#define CPSW_MAX_BLKS_TX		15
#define CPSW_MAX_BLKS_TX_SHIFT		4
#define CPSW_MAX_BLKS_RX		5

324 325 326
struct cpsw_host_regs {
	u32	max_blks;
	u32	blk_cnt;
327
	u32	tx_in_ctl;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	u32	port_vlan;
	u32	tx_pri_map;
	u32	cpdma_tx_pri_map;
	u32	cpdma_rx_chan_map;
};

struct cpsw_sliver_regs {
	u32	id_ver;
	u32	mac_control;
	u32	mac_status;
	u32	soft_reset;
	u32	rx_maxlen;
	u32	__reserved_0;
	u32	rx_pause;
	u32	tx_pause;
	u32	__reserved_1;
	u32	rx_pri_map;
};

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
struct cpsw_hw_stats {
	u32	rxgoodframes;
	u32	rxbroadcastframes;
	u32	rxmulticastframes;
	u32	rxpauseframes;
	u32	rxcrcerrors;
	u32	rxaligncodeerrors;
	u32	rxoversizedframes;
	u32	rxjabberframes;
	u32	rxundersizedframes;
	u32	rxfragments;
	u32	__pad_0[2];
	u32	rxoctets;
	u32	txgoodframes;
	u32	txbroadcastframes;
	u32	txmulticastframes;
	u32	txpauseframes;
	u32	txdeferredframes;
	u32	txcollisionframes;
	u32	txsinglecollframes;
	u32	txmultcollframes;
	u32	txexcessivecollisions;
	u32	txlatecollisions;
	u32	txunderrun;
	u32	txcarriersenseerrors;
	u32	txoctets;
	u32	octetframes64;
	u32	octetframes65t127;
	u32	octetframes128t255;
	u32	octetframes256t511;
	u32	octetframes512t1023;
	u32	octetframes1024tup;
	u32	netoctets;
	u32	rxsofoverruns;
	u32	rxmofoverruns;
	u32	rxdmaoverruns;
};

385 386 387 388 389 390
struct cpsw_slave_data {
	struct device_node *phy_node;
	char		phy_id[MII_BUS_ID_SIZE];
	int		phy_if;
	u8		mac_addr[ETH_ALEN];
	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
391
	struct phy	*ifphy;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
};

struct cpsw_platform_data {
	struct cpsw_slave_data	*slave_data;
	u32	ss_reg_ofs;	/* Subsystem control register offset */
	u32	channels;	/* number of cpdma channels (symmetric) */
	u32	slaves;		/* number of slave cpgmac ports */
	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
	u32	ale_entries;	/* ale table size */
	u32	bd_ram_size;  /*buffer descriptor ram size */
	u32	mac_control;	/* Mac control register */
	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
	bool	dual_emac;	/* Enable Dual EMAC mode */
};

407
struct cpsw_slave {
408
	void __iomem			*regs;
409 410 411 412 413
	struct cpsw_sliver_regs __iomem	*sliver;
	int				slave_num;
	u32				mac_control;
	struct cpsw_slave_data		*data;
	struct phy_device		*phy;
414 415
	struct net_device		*ndev;
	u32				port_vlan;
416 417
};

418 419
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
{
420
	return readl_relaxed(slave->regs + offset);
421 422 423 424
}

static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
{
425
	writel_relaxed(val, slave->regs + offset);
426 427
}

428 429 430 431 432
struct cpsw_vector {
	struct cpdma_chan *ch;
	int budget;
};

433
struct cpsw_common {
434
	struct device			*dev;
435
	struct cpsw_platform_data	data;
436 437
	struct napi_struct		napi_rx;
	struct napi_struct		napi_tx;
438 439 440 441
	struct cpsw_ss_regs __iomem	*regs;
	struct cpsw_wr_regs __iomem	*wr_regs;
	u8 __iomem			*hw_stats;
	struct cpsw_host_regs __iomem	*host_port_regs;
442 443 444 445
	u32				version;
	u32				coal_intvl;
	u32				bus_freq_mhz;
	int				rx_packet_max;
446
	struct cpsw_slave		*slaves;
447
	struct cpdma_ctlr		*dma;
448 449
	struct cpsw_vector		txv[CPSW_MAX_QUEUES];
	struct cpsw_vector		rxv[CPSW_MAX_QUEUES];
450
	struct cpsw_ale			*ale;
451 452 453 454
	bool				quirk_irq;
	bool				rx_irq_disabled;
	bool				tx_irq_disabled;
	u32 irqs_table[IRQ_NUM];
455
	struct cpts			*cpts;
456
	int				rx_ch_num, tx_ch_num;
457
	int				speed;
458
	int				usage_count;
459 460 461
};

struct cpsw_priv {
462 463 464 465
	struct net_device		*ndev;
	struct device			*dev;
	u32				msg_enable;
	u8				mac_addr[ETH_ALEN];
466 467
	bool				rx_pause;
	bool				tx_pause;
468
	bool				mqprio_hw;
469 470
	int				fifo_bw[CPSW_TC_NUM];
	int				shp_cfg_speed;
471 472
	int				tx_ts_enabled;
	int				rx_ts_enabled;
473
	u32 emac_port;
474
	struct cpsw_common *cpsw;
475 476
};

477 478 479 480 481 482 483 484 485 486 487 488 489 490
struct cpsw_stats {
	char stat_string[ETH_GSTRING_LEN];
	int type;
	int sizeof_stat;
	int stat_offset;
};

enum {
	CPSW_STATS,
	CPDMA_RX_STATS,
	CPDMA_TX_STATS,
};

#define CPSW_STAT(m)		CPSW_STATS,				\
491
				FIELD_SIZEOF(struct cpsw_hw_stats, m), \
492 493
				offsetof(struct cpsw_hw_stats, m)
#define CPDMA_RX_STAT(m)	CPDMA_RX_STATS,				   \
494
				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
495 496
				offsetof(struct cpdma_chan_stats, m)
#define CPDMA_TX_STAT(m)	CPDMA_TX_STATS,				   \
497
				FIELD_SIZEOF(struct cpdma_chan_stats, m), \
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
				offsetof(struct cpdma_chan_stats, m)

static const struct cpsw_stats cpsw_gstrings_stats[] = {
	{ "Good Rx Frames", CPSW_STAT(rxgoodframes) },
	{ "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
	{ "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
	{ "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
	{ "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
	{ "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
	{ "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
	{ "Rx Jabbers", CPSW_STAT(rxjabberframes) },
	{ "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
	{ "Rx Fragments", CPSW_STAT(rxfragments) },
	{ "Rx Octets", CPSW_STAT(rxoctets) },
	{ "Good Tx Frames", CPSW_STAT(txgoodframes) },
	{ "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
	{ "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
	{ "Pause Tx Frames", CPSW_STAT(txpauseframes) },
	{ "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
	{ "Collisions", CPSW_STAT(txcollisionframes) },
	{ "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
	{ "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
	{ "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
	{ "Late Collisions", CPSW_STAT(txlatecollisions) },
	{ "Tx Underrun", CPSW_STAT(txunderrun) },
	{ "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
	{ "Tx Octets", CPSW_STAT(txoctets) },
	{ "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
	{ "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
	{ "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
	{ "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
	{ "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
	{ "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
	{ "Net Octets", CPSW_STAT(netoctets) },
	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
};

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
	{ "misqueued", CPDMA_RX_STAT(misqueued) },
	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
	{ "requeue", CPDMA_RX_STAT(requeue) },
	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
};

#define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
#define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
555

556
#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
557
#define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
558 559
#define for_each_slave(priv, func, arg...)				\
	do {								\
560
		struct cpsw_slave *slave;				\
561
		struct cpsw_common *cpsw = (priv)->cpsw;		\
562
		int n;							\
563 564
		if (cpsw->data.dual_emac)				\
			(func)((cpsw)->slaves + priv->emac_port, ##arg);\
565
		else							\
566 567
			for (n = cpsw->data.slaves,			\
					slave = cpsw->slaves;		\
568 569
					n; n--)				\
				(func)(slave++, ##arg);			\
570 571
	} while (0)

572 573 574
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
				    __be16 proto, u16 vid);

575
static inline int cpsw_get_slave_port(u32 slave_num)
576
{
577
	return slave_num + 1;
578
}
579

580 581
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
582 583
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
	struct cpsw_ale *ale = cpsw->ale;
584 585
	int i;

586
	if (cpsw->data.dual_emac) {
587 588 589 590 591 592
		bool flag = false;

		/* Enabling promiscuous mode for one interface will be
		 * common for both the interface as the interface shares
		 * the same hardware resource.
		 */
593 594
		for (i = 0; i < cpsw->data.slaves; i++)
			if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
				flag = true;

		if (!enable && flag) {
			enable = true;
			dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
		}

		if (enable) {
			/* Enable Bypass */
			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);

			dev_dbg(&ndev->dev, "promiscuity enabled\n");
		} else {
			/* Disable Bypass */
			cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
			dev_dbg(&ndev->dev, "promiscuity disabled\n");
		}
	} else {
		if (enable) {
			unsigned long timeout = jiffies + HZ;

616
			/* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
617
			for (i = 0; i <= cpsw->data.slaves; i++) {
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NOLEARN, 1);
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NO_SA_UPDATE, 1);
			}

			/* Clear All Untouched entries */
			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
			do {
				cpu_relax();
				if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
					break;
			} while (time_after(timeout, jiffies));
			cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);

			/* Clear all mcast from ALE */
634
			cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
635
			__hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
636 637 638 639 640

			/* Flood All Unicast Packets to Host port */
			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
			dev_dbg(&ndev->dev, "promiscuity enabled\n");
		} else {
641
			/* Don't Flood All Unicast Packets to Host port */
642 643
			cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);

644
			/* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
645
			for (i = 0; i <= cpsw->data.slaves; i++) {
646 647 648 649 650 651 652 653 654 655
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NOLEARN, 0);
				cpsw_ale_control_set(ale, i,
						     ALE_PORT_NO_SA_UPDATE, 0);
			}
			dev_dbg(&ndev->dev, "promiscuity disabled\n");
		}
	}
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
struct addr_sync_ctx {
	struct net_device *ndev;
	const u8 *addr;		/* address to be synched */
	int consumed;		/* number of address instances */
	int flush;		/* flush flag */
};

/**
 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
 * if it's not deleted
 * @ndev: device to sync
 * @addr: address to be added or deleted
 * @vid: vlan id, if vid < 0 set/unset address for real device
 * @add: add address if the flag is set or remove otherwise
 */
static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
		       int vid, int add)
673 674
{
	struct cpsw_priv *priv = netdev_priv(ndev);
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
	struct cpsw_common *cpsw = priv->cpsw;
	int mask, flags, ret;

	if (vid < 0) {
		if (cpsw->data.dual_emac)
			vid = cpsw->slaves[priv->emac_port].port_vlan;
		else
			vid = 0;
	}

	mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
	flags = vid ? ALE_VLAN : 0;

	if (add)
		ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
	else
		ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);

	return ret;
}

static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{
	struct addr_sync_ctx *sync_ctx = ctx;
	struct netdev_hw_addr *ha;
	int found = 0, ret = 0;

	if (!vdev || !(vdev->flags & IFF_UP))
		return 0;

	/* vlan address is relevant if its sync_cnt != 0 */
	netdev_for_each_mc_addr(ha, vdev) {
		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
			found = ha->sync_cnt;
			break;
		}
	}

	if (found)
		sync_ctx->consumed++;

	if (sync_ctx->flush) {
		if (!found)
			cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
		return 0;
	}

	if (found)
		ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);

	return ret;
}

static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
{
	struct addr_sync_ctx sync_ctx;
	int ret;

	sync_ctx.consumed = 0;
	sync_ctx.addr = addr;
	sync_ctx.ndev = ndev;
	sync_ctx.flush = 0;

	ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
	if (sync_ctx.consumed < num && !ret)
		ret = cpsw_set_mc(ndev, addr, -1, 1);

	return ret;
}

static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
{
	struct addr_sync_ctx sync_ctx;

	sync_ctx.consumed = 0;
	sync_ctx.addr = addr;
	sync_ctx.ndev = ndev;
	sync_ctx.flush = 1;

	vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
	if (sync_ctx.consumed == num)
		cpsw_set_mc(ndev, addr, -1, 0);
757 758 759 760

	return 0;
}

761
static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
762
{
763 764 765
	struct addr_sync_ctx *sync_ctx = ctx;
	struct netdev_hw_addr *ha;
	int found = 0;
766

767 768 769 770 771 772 773 774 775
	if (!vdev || !(vdev->flags & IFF_UP))
		return 0;

	/* vlan address is relevant if its sync_cnt != 0 */
	netdev_for_each_mc_addr(ha, vdev) {
		if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
			found = ha->sync_cnt;
			break;
		}
776 777
	}

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	if (!found)
		return 0;

	sync_ctx->consumed++;
	cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
	return 0;
}

static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
{
	struct addr_sync_ctx sync_ctx;

	sync_ctx.addr = addr;
	sync_ctx.ndev = ndev;
	sync_ctx.consumed = 0;

	vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
	if (sync_ctx.consumed < num)
		cpsw_set_mc(ndev, addr, -1, 0);

798 799 800 801 802 803
	return 0;
}

static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
804 805 806

	if (ndev->flags & IFF_PROMISC) {
		/* Enable promiscuous mode */
807
		cpsw_set_promiscious(ndev, true);
808
		cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
809
		return;
810 811 812
	} else {
		/* Disable promiscuous mode */
		cpsw_set_promiscious(ndev, false);
813 814
	}

815
	/* Restore allmulti on vlans if necessary */
816
	cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
817

818 819 820
	/* add/remove mcast address either for real netdev or for vlan */
	__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
			       cpsw_del_mc_addr);
821 822
}

823
static void cpsw_intr_enable(struct cpsw_common *cpsw)
824
{
825 826
	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
827

828
	cpdma_ctlr_int_ctrl(cpsw->dma, true);
829 830 831
	return;
}

832
static void cpsw_intr_disable(struct cpsw_common *cpsw)
833
{
834 835
	writel_relaxed(0, &cpsw->wr_regs->tx_en);
	writel_relaxed(0, &cpsw->wr_regs->rx_en);
836

837
	cpdma_ctlr_int_ctrl(cpsw->dma, false);
838 839 840
	return;
}

841
static void cpsw_tx_handler(void *token, int len, int status)
842
{
843
	struct netdev_queue	*txq;
844 845
	struct sk_buff		*skb = token;
	struct net_device	*ndev = skb->dev;
846
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
847

848 849 850
	/* Check whether the queue is stopped due to stalled tx dma, if the
	 * queue is stopped then start the queue as we have free desc for tx
	 */
851 852 853 854
	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
	if (unlikely(netif_tx_queue_stopped(txq)))
		netif_tx_wake_queue(txq);

855
	cpts_tx_timestamp(cpsw->cpts, skb);
856 857
	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += len;
858 859 860
	dev_kfree_skb_any(skb);
}

861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
static void cpsw_rx_vlan_encap(struct sk_buff *skb)
{
	struct cpsw_priv *priv = netdev_priv(skb->dev);
	struct cpsw_common *cpsw = priv->cpsw;
	u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
	u16 vtag, vid, prio, pkt_type;

	/* Remove VLAN header encapsulation word */
	skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);

	pkt_type = (rx_vlan_encap_hdr >>
		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
		    CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
	/* Ignore unknown & Priority-tagged packets*/
	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
	    pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
		return;

	vid = (rx_vlan_encap_hdr >>
	       CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
	       VLAN_VID_MASK;
	/* Ignore vid 0 and pass packet as is */
	if (!vid)
		return;
	/* Ignore default vlans in dual mac mode */
	if (cpsw->data.dual_emac &&
	    vid == cpsw->slaves[priv->emac_port].port_vlan)
		return;

	prio = (rx_vlan_encap_hdr >>
		CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
		CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;

	vtag = (prio << VLAN_PRIO_SHIFT) | vid;
	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);

	/* strip vlan tag for VLAN-tagged packet */
	if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
		memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
		skb_pull(skb, VLAN_HLEN);
	}
}

904
static void cpsw_rx_handler(void *token, int len, int status)
905
{
906
	struct cpdma_chan	*ch;
907
	struct sk_buff		*skb = token;
908
	struct sk_buff		*new_skb;
909
	struct net_device	*ndev = skb->dev;
910
	int			ret = 0, port;
911
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
912
	struct cpsw_priv	*priv;
913

914 915 916 917 918 919 920
	if (cpsw->data.dual_emac) {
		port = CPDMA_RX_SOURCE_PORT(status);
		if (port) {
			ndev = cpsw->slaves[--port].ndev;
			skb->dev = ndev;
		}
	}
921

922
	if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
923
		/* In dual emac mode check for all interfaces */
924
		if (cpsw->data.dual_emac && cpsw->usage_count &&
925
		    (status >= 0)) {
926 927
			/* The packet received is for the interface which
			 * is already down and the other interface is up
928
			 * and running, instead of freeing which results
929 930 931 932 933 934 935
			 * in reducing of the number of rx descriptor in
			 * DMA engine, requeue skb back to cpdma.
			 */
			new_skb = skb;
			goto requeue;
		}

936
		/* the interface is going down, skbs are purged */
937 938 939
		dev_kfree_skb_any(skb);
		return;
	}
940

941
	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
942
	if (new_skb) {
943
		skb_copy_queue_mapping(new_skb, skb);
944
		skb_put(skb, len);
945 946
		if (status & CPDMA_RX_VLAN_ENCAP)
			cpsw_rx_vlan_encap(skb);
947 948 949
		priv = netdev_priv(ndev);
		if (priv->rx_ts_enabled)
			cpts_rx_timestamp(cpsw->cpts, skb);
950 951
		skb->protocol = eth_type_trans(skb, ndev);
		netif_receive_skb(skb);
952 953
		ndev->stats.rx_bytes += len;
		ndev->stats.rx_packets++;
954
		kmemleak_not_leak(new_skb);
955
	} else {
956
		ndev->stats.rx_dropped++;
957
		new_skb = skb;
958 959
	}

960
requeue:
961 962 963 964 965
	if (netif_dormant(ndev)) {
		dev_kfree_skb_any(new_skb);
		return;
	}

966
	ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
967
	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
968
				skb_tailroom(new_skb), 0);
969 970
	if (WARN_ON(ret < 0))
		dev_kfree_skb_any(new_skb);
971 972
}

973
static void cpsw_split_res(struct net_device *ndev)
974 975
{
	struct cpsw_priv *priv = netdev_priv(ndev);
976
	u32 consumed_rate = 0, bigest_rate = 0;
977 978
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_vector *txv = cpsw->txv;
979
	int i, ch_weight, rlim_ch_num = 0;
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	int budget, bigest_rate_ch = 0;
	u32 ch_rate, max_rate;
	int ch_budget = 0;

	for (i = 0; i < cpsw->tx_ch_num; i++) {
		ch_rate = cpdma_chan_get_rate(txv[i].ch);
		if (!ch_rate)
			continue;

		rlim_ch_num++;
		consumed_rate += ch_rate;
	}

	if (cpsw->tx_ch_num == rlim_ch_num) {
		max_rate = consumed_rate;
995 996 997 998
	} else if (!rlim_ch_num) {
		ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
		bigest_rate = 0;
		max_rate = consumed_rate;
999
	} else {
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		max_rate = cpsw->speed * 1000;

		/* if max_rate is less then expected due to reduced link speed,
		 * split proportionally according next potential max speed
		 */
		if (max_rate < consumed_rate)
			max_rate *= 10;

		if (max_rate < consumed_rate)
			max_rate *= 10;
1010

1011 1012 1013 1014 1015 1016 1017
		ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
		ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
			    (cpsw->tx_ch_num - rlim_ch_num);
		bigest_rate = (max_rate - consumed_rate) /
			      (cpsw->tx_ch_num - rlim_ch_num);
	}

1018
	/* split tx weight/budget */
1019 1020 1021 1022 1023 1024
	budget = CPSW_POLL_WEIGHT;
	for (i = 0; i < cpsw->tx_ch_num; i++) {
		ch_rate = cpdma_chan_get_rate(txv[i].ch);
		if (ch_rate) {
			txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
			if (!txv[i].budget)
1025
				txv[i].budget++;
1026 1027 1028 1029
			if (ch_rate > bigest_rate) {
				bigest_rate_ch = i;
				bigest_rate = ch_rate;
			}
1030 1031 1032 1033 1034

			ch_weight = (ch_rate * 100) / max_rate;
			if (!ch_weight)
				ch_weight++;
			cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
1035 1036 1037 1038
		} else {
			txv[i].budget = ch_budget;
			if (!bigest_rate_ch)
				bigest_rate_ch = i;
1039
			cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
		}

		budget -= txv[i].budget;
	}

	if (budget)
		txv[bigest_rate_ch].budget += budget;

	/* split rx budget */
	budget = CPSW_POLL_WEIGHT;
	ch_budget = budget / cpsw->rx_ch_num;
	for (i = 0; i < cpsw->rx_ch_num; i++) {
		cpsw->rxv[i].budget = ch_budget;
		budget -= ch_budget;
	}

	if (budget)
		cpsw->rxv[0].budget += budget;
}

1060
static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
1061
{
1062
	struct cpsw_common *cpsw = dev_id;
1063

1064
	writel(0, &cpsw->wr_regs->tx_en);
1065
	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
1066

1067 1068 1069
	if (cpsw->quirk_irq) {
		disable_irq_nosync(cpsw->irqs_table[1]);
		cpsw->tx_irq_disabled = true;
1070 1071
	}

1072
	napi_schedule(&cpsw->napi_tx);
1073 1074 1075 1076 1077
	return IRQ_HANDLED;
}

static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
{
1078
	struct cpsw_common *cpsw = dev_id;
1079

1080
	cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
1081
	writel(0, &cpsw->wr_regs->rx_en);
1082

1083 1084 1085
	if (cpsw->quirk_irq) {
		disable_irq_nosync(cpsw->irqs_table[0]);
		cpsw->rx_irq_disabled = true;
1086 1087
	}

1088
	napi_schedule(&cpsw->napi_rx);
1089
	return IRQ_HANDLED;
1090 1091
}

1092
static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
1093
{
1094
	u32			ch_map;
1095
	int			num_tx, cur_budget, ch;
1096
	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
1097
	struct cpsw_vector	*txv;
1098

1099 1100
	/* process every unprocessed channel */
	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
1101 1102
	for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
		if (!(ch_map & 0x80))
1103 1104
			continue;

1105 1106 1107 1108 1109 1110 1111
		txv = &cpsw->txv[ch];
		if (unlikely(txv->budget > budget - num_tx))
			cur_budget = budget - num_tx;
		else
			cur_budget = txv->budget;

		num_tx += cpdma_chan_process(txv->ch, cur_budget);
1112 1113
		if (num_tx >= budget)
			break;
1114 1115
	}

1116 1117
	if (num_tx < budget) {
		napi_complete(napi_tx);
1118
		writel(0xff, &cpsw->wr_regs->tx_en);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	}

	return num_tx;
}

static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
{
	struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
	int num_tx;

	num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
	if (num_tx < budget) {
		napi_complete(napi_tx);
		writel(0xff, &cpsw->wr_regs->tx_en);
		if (cpsw->tx_irq_disabled) {
1134 1135
			cpsw->tx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[1]);
1136
		}
1137 1138 1139 1140 1141
	}

	return num_tx;
}

1142
static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
1143
{
1144
	u32			ch_map;
1145
	int			num_rx, cur_budget, ch;
1146
	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
1147
	struct cpsw_vector	*rxv;
1148

1149 1150
	/* process every unprocessed channel */
	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
1151
	for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
1152 1153 1154
		if (!(ch_map & 0x01))
			continue;

1155 1156 1157 1158 1159 1160 1161
		rxv = &cpsw->rxv[ch];
		if (unlikely(rxv->budget > budget - num_rx))
			cur_budget = budget - num_rx;
		else
			cur_budget = rxv->budget;

		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
1162 1163
		if (num_rx >= budget)
			break;
1164 1165
	}

1166
	if (num_rx < budget) {
1167
		napi_complete_done(napi_rx, num_rx);
1168
		writel(0xff, &cpsw->wr_regs->rx_en);
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
	}

	return num_rx;
}

static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
{
	struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
	int num_rx;

	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
	if (num_rx < budget) {
		napi_complete_done(napi_rx, num_rx);
		writel(0xff, &cpsw->wr_regs->rx_en);
		if (cpsw->rx_irq_disabled) {
1184 1185
			cpsw->rx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[0]);
1186
		}
1187 1188 1189 1190 1191 1192 1193 1194 1195
	}

	return num_rx;
}

static inline void soft_reset(const char *module, void __iomem *reg)
{
	unsigned long timeout = jiffies + HZ;

1196
	writel_relaxed(1, reg);
1197 1198
	do {
		cpu_relax();
1199
	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
1200

1201
	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
1202 1203 1204 1205 1206
}

static void cpsw_set_slave_mac(struct cpsw_slave *slave,
			       struct cpsw_priv *priv)
{
1207 1208
	slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
	slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
1209 1210
}

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
static bool cpsw_shp_is_off(struct cpsw_priv *priv)
{
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	u32 shift, mask, val;

	val = readl_relaxed(&cpsw->regs->ptype);

	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
	mask = 7 << shift;
	val = val & mask;

	return !val;
}

static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
{
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	u32 shift, mask, val;

	val = readl_relaxed(&cpsw->regs->ptype);

	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
	mask = (1 << --fifo) << shift;
	val = on ? val | mask : val & ~mask;

	writel_relaxed(val, &cpsw->regs->ptype);
}

1243 1244 1245 1246 1247 1248
static void _cpsw_adjust_link(struct cpsw_slave *slave,
			      struct cpsw_priv *priv, bool *link)
{
	struct phy_device	*phy = slave->phy;
	u32			mac_control = 0;
	u32			slave_port;
1249
	struct cpsw_common *cpsw = priv->cpsw;
1250 1251 1252 1253

	if (!phy)
		return;

1254
	slave_port = cpsw_get_slave_port(slave->slave_num);
1255 1256

	if (phy->link) {
1257
		mac_control = cpsw->data.mac_control;
1258 1259

		/* enable forwarding */
1260
		cpsw_ale_control_set(cpsw->ale, slave_port,
1261 1262 1263 1264 1265 1266
				     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);

		if (phy->speed == 1000)
			mac_control |= BIT(7);	/* GIGABITEN	*/
		if (phy->duplex)
			mac_control |= BIT(0);	/* FULLDUPLEXEN	*/
1267 1268 1269 1270

		/* set speed_in input in case RMII mode is used in 100Mbps */
		if (phy->speed == 100)
			mac_control |= BIT(15);
1271 1272
		/* in band mode only works in 10Mbps RGMII mode */
		else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1273
			mac_control |= BIT(18); /* In Band mode */
1274

1275 1276 1277 1278 1279 1280
		if (priv->rx_pause)
			mac_control |= BIT(3);

		if (priv->tx_pause)
			mac_control |= BIT(4);

1281
		*link = true;
1282 1283 1284 1285 1286 1287

		if (priv->shp_cfg_speed &&
		    priv->shp_cfg_speed != slave->phy->speed &&
		    !cpsw_shp_is_off(priv))
			dev_warn(priv->dev,
				 "Speed was changed, CBS shaper speeds are changed!");
1288 1289 1290
	} else {
		mac_control = 0;
		/* disable forwarding */
1291
		cpsw_ale_control_set(cpsw->ale, slave_port,
1292 1293 1294 1295 1296
				     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
	}

	if (mac_control != slave->mac_control) {
		phy_print_status(phy);
1297
		writel_relaxed(mac_control, &slave->sliver->mac_control);
1298 1299 1300 1301 1302
	}

	slave->mac_control = mac_control;
}

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
static int cpsw_get_common_speed(struct cpsw_common *cpsw)
{
	int i, speed;

	for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
		if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
			speed += cpsw->slaves[i].phy->speed;

	return speed;
}

static int cpsw_need_resplit(struct cpsw_common *cpsw)
{
	int i, rlim_ch_num;
	int speed, ch_rate;

	/* re-split resources only in case speed was changed */
	speed = cpsw_get_common_speed(cpsw);
	if (speed == cpsw->speed || !speed)
		return 0;

	cpsw->speed = speed;

	for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
		ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
		if (!ch_rate)
			break;

		rlim_ch_num++;
	}

	/* cases not dependent on speed */
	if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
		return 0;

	return 1;
}

1341 1342 1343
static void cpsw_adjust_link(struct net_device *ndev)
{
	struct cpsw_priv	*priv = netdev_priv(ndev);
1344
	struct cpsw_common	*cpsw = priv->cpsw;
1345 1346 1347 1348 1349
	bool			link = false;

	for_each_slave(priv, _cpsw_adjust_link, priv, &link);

	if (link) {
1350 1351 1352
		if (cpsw_need_resplit(cpsw))
			cpsw_split_res(ndev);

1353 1354
		netif_carrier_on(ndev);
		if (netif_running(ndev))
1355
			netif_tx_wake_all_queues(ndev);
1356 1357
	} else {
		netif_carrier_off(ndev);
1358
		netif_tx_stop_all_queues(ndev);
1359 1360 1361
	}
}

1362 1363 1364
static int cpsw_get_coalesce(struct net_device *ndev,
				struct ethtool_coalesce *coal)
{
1365
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1366

1367
	coal->rx_coalesce_usecs = cpsw->coal_intvl;
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	return 0;
}

static int cpsw_set_coalesce(struct net_device *ndev,
				struct ethtool_coalesce *coal)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	u32 int_ctrl;
	u32 num_interrupts = 0;
	u32 prescale = 0;
	u32 addnl_dvdr = 1;
	u32 coal_intvl = 0;
1380
	struct cpsw_common *cpsw = priv->cpsw;
1381 1382 1383

	coal_intvl = coal->rx_coalesce_usecs;

1384
	int_ctrl =  readl(&cpsw->wr_regs->int_control);
1385
	prescale = cpsw->bus_freq_mhz * 4;
1386

1387 1388 1389 1390 1391
	if (!coal->rx_coalesce_usecs) {
		int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
		goto update_return;
	}

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	if (coal_intvl < CPSW_CMINTMIN_INTVL)
		coal_intvl = CPSW_CMINTMIN_INTVL;

	if (coal_intvl > CPSW_CMINTMAX_INTVL) {
		/* Interrupt pacer works with 4us Pulse, we can
		 * throttle further by dilating the 4us pulse.
		 */
		addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;

		if (addnl_dvdr > 1) {
			prescale *= addnl_dvdr;
			if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
				coal_intvl = (CPSW_CMINTMAX_INTVL
						* addnl_dvdr);
		} else {
			addnl_dvdr = 1;
			coal_intvl = CPSW_CMINTMAX_INTVL;
		}
	}

	num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
1413 1414
	writel(num_interrupts, &cpsw->wr_regs->rx_imax);
	writel(num_interrupts, &cpsw->wr_regs->tx_imax);
1415 1416 1417 1418

	int_ctrl |= CPSW_INTPACEEN;
	int_ctrl &= (~CPSW_INTPRESCALE_MASK);
	int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
1419 1420

update_return:
1421
	writel(int_ctrl, &cpsw->wr_regs->int_control);
1422 1423

	cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
1424
	cpsw->coal_intvl = coal_intvl;
1425 1426 1427 1428

	return 0;
}

1429 1430
static int cpsw_get_sset_count(struct net_device *ndev, int sset)
{
1431 1432
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);

1433 1434
	switch (sset) {
	case ETH_SS_STATS:
1435 1436 1437
		return (CPSW_STATS_COMMON_LEN +
		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
		       CPSW_STATS_CH_LEN);
1438 1439 1440 1441 1442
	default:
		return -EOPNOTSUPP;
	}
}

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
{
	int ch_stats_len;
	int line;
	int i;

	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
	for (i = 0; i < ch_stats_len; i++) {
		line = i % CPSW_STATS_CH_LEN;
		snprintf(*p, ETH_GSTRING_LEN,
1453 1454
			 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
			 (long)(i / CPSW_STATS_CH_LEN),
1455 1456 1457 1458 1459
			 cpsw_gstrings_ch_stats[line].stat_string);
		*p += ETH_GSTRING_LEN;
	}
}

1460 1461
static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
{
1462
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1463 1464 1465 1466 1467
	u8 *p = data;
	int i;

	switch (stringset) {
	case ETH_SS_STATS:
1468
		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
1469 1470 1471 1472
			memcpy(p, cpsw_gstrings_stats[i].stat_string,
			       ETH_GSTRING_LEN);
			p += ETH_GSTRING_LEN;
		}
1473 1474 1475

		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
1476 1477 1478 1479 1480 1481 1482 1483
		break;
	}
}

static void cpsw_get_ethtool_stats(struct net_device *ndev,
				    struct ethtool_stats *stats, u64 *data)
{
	u8 *p;
1484
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1485 1486
	struct cpdma_chan_stats ch_stats;
	int i, l, ch;
1487 1488

	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
1489 1490 1491 1492 1493
	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
		data[l] = readl(cpsw->hw_stats +
				cpsw_gstrings_stats[l].stat_offset);

	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1494
		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
1495 1496 1497 1498 1499 1500
		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
			p = (u8 *)&ch_stats +
				cpsw_gstrings_ch_stats[i].stat_offset;
			data[l] = *(u32 *)p;
		}
	}
1501

1502
	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1503
		cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
1504 1505 1506 1507
		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
			p = (u8 *)&ch_stats +
				cpsw_gstrings_ch_stats[i].stat_offset;
			data[l] = *(u32 *)p;
1508 1509 1510 1511
		}
	}
}

1512
static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
1513 1514
					struct sk_buff *skb,
					struct cpdma_chan *txch)
1515
{
1516 1517
	struct cpsw_common *cpsw = priv->cpsw;

1518
	skb_tx_timestamp(skb);
1519
	return cpdma_chan_submit(txch, skb, skb->data, skb->len,
1520
				 priv->emac_port + cpsw->data.dual_emac);
1521 1522 1523 1524 1525 1526
}

static inline void cpsw_add_dual_emac_def_ale_entries(
		struct cpsw_priv *priv, struct cpsw_slave *slave,
		u32 slave_port)
{
1527
	struct cpsw_common *cpsw = priv->cpsw;
1528
	u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
1529

1530
	if (cpsw->version == CPSW_VERSION_1)
1531 1532 1533
		slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
	else
		slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
1534
	cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
1535
			  port_mask, port_mask, 0);
1536
	cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1537
			   ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
1538 1539 1540
	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
			   HOST_PORT_NUM, ALE_VLAN |
			   ALE_SECURE, slave->port_vlan);
1541 1542
	cpsw_ale_control_set(cpsw->ale, slave_port,
			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
1543 1544
}

1545
static void soft_reset_slave(struct cpsw_slave *slave)
1546 1547 1548
{
	char name[32];

1549
	snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1550
	soft_reset(name, &slave->sliver->soft_reset);
1551 1552 1553 1554 1555
}

static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
	u32 slave_port;
1556
	struct phy_device *phy;
1557
	struct cpsw_common *cpsw = priv->cpsw;
1558 1559

	soft_reset_slave(slave);
1560 1561

	/* setup priority mapping */
1562
	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
1563

1564
	switch (cpsw->version) {
1565 1566
	case CPSW_VERSION_1:
		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
1567 1568 1569 1570 1571 1572
		/* Increase RX FIFO size to 5 for supporting fullduplex
		 * flow control mode
		 */
		slave_write(slave,
			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
1573 1574
		break;
	case CPSW_VERSION_2:
1575
	case CPSW_VERSION_3:
1576
	case CPSW_VERSION_4:
1577
		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
1578 1579 1580 1581 1582 1583
		/* Increase RX FIFO size to 5 for supporting fullduplex
		 * flow control mode
		 */
		slave_write(slave,
			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
1584 1585
		break;
	}
1586 1587

	/* setup max packet size, and mac address */
1588
	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
1589 1590 1591 1592
	cpsw_set_slave_mac(slave, priv);

	slave->mac_control = 0;	/* no link yet */

1593
	slave_port = cpsw_get_slave_port(slave->slave_num);
1594

1595
	if (cpsw->data.dual_emac)
1596 1597
		cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
	else
1598
		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1599
				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
1600

1601
	if (slave->data->phy_node) {
1602
		phy = of_phy_connect(priv->ndev, slave->data->phy_node,
1603
				 &cpsw_adjust_link, 0, slave->data->phy_if);
1604
		if (!phy) {
1605 1606
			dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
				slave->data->phy_node,
1607 1608 1609 1610
				slave->slave_num);
			return;
		}
	} else {
1611
		phy = phy_connect(priv->ndev, slave->data->phy_id,
1612
				 &cpsw_adjust_link, slave->data->phy_if);
1613
		if (IS_ERR(phy)) {
1614 1615 1616
			dev_err(priv->dev,
				"phy \"%s\" not found on slave %d, err %ld\n",
				slave->data->phy_id, slave->slave_num,
1617
				PTR_ERR(phy));
1618 1619 1620
			return;
		}
	}
1621

1622 1623
	slave->phy = phy;

1624
	phy_attached_info(slave->phy);
1625

1626 1627 1628
	phy_start(slave->phy);

	/* Configure GMII_SEL register */
1629 1630 1631 1632 1633 1634
	if (!IS_ERR(slave->data->ifphy))
		phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
				 slave->data->phy_if);
	else
		cpsw_phy_sel(cpsw->dev, slave->phy->interface,
			     slave->slave_num);
1635 1636
}

1637 1638
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
1639 1640
	struct cpsw_common *cpsw = priv->cpsw;
	const int vlan = cpsw->data.default_vlan;
1641 1642
	u32 reg;
	int i;
1643
	int unreg_mcast_mask;
1644

1645
	reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1646 1647
	       CPSW2_PORT_VLAN;

1648
	writel(vlan, &cpsw->host_port_regs->port_vlan);
1649

1650 1651
	for (i = 0; i < cpsw->data.slaves; i++)
		slave_write(cpsw->slaves + i, vlan, reg);
1652

1653 1654 1655 1656 1657
	if (priv->ndev->flags & IFF_ALLMULTI)
		unreg_mcast_mask = ALE_ALL_PORTS;
	else
		unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;

1658
	cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
1659 1660
			  ALE_ALL_PORTS, ALE_ALL_PORTS,
			  unreg_mcast_mask);
1661 1662
}

1663 1664
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
1665
	u32 fifo_mode;
1666 1667
	u32 control_reg;
	struct cpsw_common *cpsw = priv->cpsw;
1668

1669
	/* soft reset the controller and initialize ale */
1670
	soft_reset("cpsw", &cpsw->regs->soft_reset);
1671
	cpsw_ale_start(cpsw->ale);
1672 1673

	/* switch to vlan unaware mode */
1674
	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1675
			     CPSW_ALE_VLAN_AWARE);
1676
	control_reg = readl(&cpsw->regs->control);
1677
	control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1678
	writel(control_reg, &cpsw->regs->control);
1679
	fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1680
		     CPSW_FIFO_NORMAL_MODE;
1681
	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1682 1683

	/* setup host port priority mapping */
1684 1685 1686
	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
		       &cpsw->host_port_regs->cpdma_tx_pri_map);
	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1687

1688
	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1689 1690
			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);

1691
	if (!cpsw->data.dual_emac) {
1692
		cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1693
				   0, 0);
1694
		cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1695
				   ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1696
	}
1697 1698
}

1699 1700 1701 1702 1703
static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
{
	struct cpsw_common *cpsw = priv->cpsw;
	struct sk_buff *skb;
	int ch_buf_num;
1704 1705 1706
	int ch, i, ret;

	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1707
		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1708 1709 1710 1711 1712 1713 1714 1715
		for (i = 0; i < ch_buf_num; i++) {
			skb = __netdev_alloc_skb_ip_align(priv->ndev,
							  cpsw->rx_packet_max,
							  GFP_KERNEL);
			if (!skb) {
				cpsw_err(priv, ifup, "cannot allocate skb\n");
				return -ENOMEM;
			}
1716

1717
			skb_set_queue_mapping(skb, ch);
1718 1719 1720
			ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
						skb->data, skb_tailroom(skb),
						0);
1721 1722 1723 1724 1725 1726 1727 1728
			if (ret < 0) {
				cpsw_err(priv, ifup,
					 "cannot submit skb to channel %d rx, error %d\n",
					 ch, ret);
				kfree_skb(skb);
				return ret;
			}
			kmemleak_not_leak(skb);
1729 1730
		}

1731 1732 1733
		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
			  ch, ch_buf_num);
	}
1734

1735
	return 0;
1736 1737
}

1738
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1739
{
1740 1741
	u32 slave_port;

1742
	slave_port = cpsw_get_slave_port(slave->slave_num);
1743

1744 1745 1746 1747 1748
	if (!slave->phy)
		return;
	phy_stop(slave->phy);
	phy_disconnect(slave->phy);
	slave->phy = NULL;
1749
	cpsw_ale_control_set(cpsw->ale, slave_port,
1750
			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1751
	soft_reset_slave(slave);
1752 1753
}

1754 1755 1756 1757 1758 1759 1760 1761
static int cpsw_tc_to_fifo(int tc, int num_tc)
{
	if (tc == num_tc - 1)
		return 0;

	return CPSW_FIFO_SHAPERS_NUM - tc;
}

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
{
	struct cpsw_common *cpsw = priv->cpsw;
	u32 val = 0, send_pct, shift;
	struct cpsw_slave *slave;
	int pct = 0, i;

	if (bw > priv->shp_cfg_speed * 1000)
		goto err;

	/* shaping has to stay enabled for highest fifos linearly
	 * and fifo bw no more then interface can allow
	 */
	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	send_pct = slave_read(slave, SEND_PERCENT);
	for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
		if (!bw) {
			if (i >= fifo || !priv->fifo_bw[i])
				continue;

			dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
			continue;
		}

		if (!priv->fifo_bw[i] && i > fifo) {
			dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
			return -EINVAL;
		}

		shift = (i - 1) * 8;
		if (i == fifo) {
			send_pct &= ~(CPSW_PCT_MASK << shift);
			val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
			if (!val)
				val = 1;

			send_pct |= val << shift;
			pct += val;
			continue;
		}

		if (priv->fifo_bw[i])
			pct += (send_pct >> shift) & CPSW_PCT_MASK;
	}

	if (pct >= 100)
		goto err;

	slave_write(slave, send_pct, SEND_PERCENT);
	priv->fifo_bw[fifo] = bw;

	dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
		 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));

	return 0;
err:
	dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
	return -EINVAL;
}

static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
{
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	u32 tx_in_ctl_rg, val;
	int ret;

	ret = cpsw_set_fifo_bw(priv, fifo, bw);
	if (ret)
		return ret;

	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
		       CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;

	if (!bw)
		cpsw_fifo_shp_on(priv, fifo, bw);

	val = slave_read(slave, tx_in_ctl_rg);
	if (cpsw_shp_is_off(priv)) {
		/* disable FIFOs rate limited queues */
		val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);

		/* set type of FIFO queues to normal priority mode */
		val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);

		/* set type of FIFO queues to be rate limited */
		if (bw)
			val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
		else
			priv->shp_cfg_speed = 0;
	}

	/* toggle a FIFO rate limited queue */
	if (bw)
		val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
	else
		val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
	slave_write(slave, val, tx_in_ctl_rg);

	/* FIFO transmit shape enable */
	cpsw_fifo_shp_on(priv, fifo, bw);
	return 0;
}

/* Defaults:
 * class A - prio 3
 * class B - prio 2
 * shaping for class A should be set first
 */
static int cpsw_set_cbs(struct net_device *ndev,
			struct tc_cbs_qopt_offload *qopt)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	int prev_speed = 0;
	int tc, ret, fifo;
	u32 bw = 0;

	tc = netdev_txq_to_tc(priv->ndev, qopt->queue);

	/* enable channels in backward order, as highest FIFOs must be rate
	 * limited first and for compliance with CPDMA rate limited channels
	 * that also used in bacward order. FIFO0 cannot be rate limited.
	 */
	fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
	if (!fifo) {
		dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
		return -EINVAL;
	}

	/* do nothing, it's disabled anyway */
	if (!qopt->enable && !priv->fifo_bw[fifo])
		return 0;

	/* shapers can be set if link speed is known */
	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	if (slave->phy && slave->phy->link) {
		if (priv->shp_cfg_speed &&
		    priv->shp_cfg_speed != slave->phy->speed)
			prev_speed = priv->shp_cfg_speed;

		priv->shp_cfg_speed = slave->phy->speed;
	}

	if (!priv->shp_cfg_speed) {
		dev_err(priv->dev, "Link speed is not known");
		return -1;
	}

	ret = pm_runtime_get_sync(cpsw->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(cpsw->dev);
		return ret;
	}

	bw = qopt->enable ? qopt->idleslope : 0;
	ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
	if (ret) {
		priv->shp_cfg_speed = prev_speed;
		prev_speed = 0;
	}

	if (bw && prev_speed)
		dev_warn(priv->dev,
			 "Speed was changed, CBS shaper speeds are changed!");

	pm_runtime_put_sync(cpsw->dev);
	return ret;
}

1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
	int fifo, bw;

	for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
		bw = priv->fifo_bw[fifo];
		if (!bw)
			continue;

		cpsw_set_fifo_rlimit(priv, fifo, bw);
	}
}

static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
	struct cpsw_common *cpsw = priv->cpsw;
	u32 tx_prio_map = 0;
	int i, tc, fifo;
	u32 tx_prio_rg;

	if (!priv->mqprio_hw)
		return;

	for (i = 0; i < 8; i++) {
		tc = netdev_get_prio_tc_map(priv->ndev, i);
		fifo = CPSW_FIFO_SHAPERS_NUM - tc;
		tx_prio_map |= fifo << (4 * i);
	}

	tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
		     CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;

	slave_write(slave, tx_prio_map, tx_prio_rg);
}

1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
{
	struct cpsw_priv *priv = arg;

	if (!vdev)
		return 0;

	cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
	return 0;
}

1980 1981 1982
/* restore resources after port reset */
static void cpsw_restore(struct cpsw_priv *priv)
{
1983 1984 1985
	/* restore vlan configurations */
	vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);

1986 1987 1988 1989 1990 1991 1992
	/* restore MQPRIO offload */
	for_each_slave(priv, cpsw_mqprio_resume, priv);

	/* restore CBS offload */
	for_each_slave(priv, cpsw_cbs_resume, priv);
}

1993 1994 1995
static int cpsw_ndo_open(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
1996
	struct cpsw_common *cpsw = priv->cpsw;
1997
	int ret;
1998 1999
	u32 reg;

2000
	ret = pm_runtime_get_sync(cpsw->dev);
2001
	if (ret < 0) {
2002
		pm_runtime_put_noidle(cpsw->dev);
2003 2004
		return ret;
	}
2005

2006 2007
	netif_carrier_off(ndev);

2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
	/* Notify the stack of the actual queue counts. */
	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
	if (ret) {
		dev_err(priv->dev, "cannot set real number of tx queues\n");
		goto err_cleanup;
	}

	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
	if (ret) {
		dev_err(priv->dev, "cannot set real number of rx queues\n");
		goto err_cleanup;
	}

2021
	reg = cpsw->version;
2022 2023 2024 2025 2026

	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
		 CPSW_RTL_VERSION(reg));

2027 2028
	/* Initialize host and slave ports */
	if (!cpsw->usage_count)
2029
		cpsw_init_host_port(priv);
2030 2031
	for_each_slave(priv, cpsw_slave_open, priv);

2032
	/* Add default VLAN */
2033
	if (!cpsw->data.dual_emac)
2034 2035
		cpsw_add_default_vlan(priv);
	else
2036
		cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
2037
				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
2038

2039 2040
	/* initialize shared resources for every ndev */
	if (!cpsw->usage_count) {
2041
		/* disable priority elevation */
2042
		writel_relaxed(0, &cpsw->regs->ptype);
2043

2044
		/* enable statistics collection only on all ports */
2045
		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
2046

2047
		/* Enable internal fifo flow control */
2048
		writel(0x7, &cpsw->regs->flow_control);
2049

2050 2051
		napi_enable(&cpsw->napi_rx);
		napi_enable(&cpsw->napi_tx);
2052

2053 2054 2055
		if (cpsw->tx_irq_disabled) {
			cpsw->tx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[1]);
2056 2057
		}

2058 2059 2060
		if (cpsw->rx_irq_disabled) {
			cpsw->rx_irq_disabled = false;
			enable_irq(cpsw->irqs_table[0]);
2061 2062
		}

2063 2064 2065
		ret = cpsw_fill_rx_channels(priv);
		if (ret < 0)
			goto err_cleanup;
2066

2067
		if (cpts_register(cpsw->cpts))
2068 2069
			dev_err(priv->dev, "error registering cpts device\n");

2070 2071
	}

2072 2073
	cpsw_restore(priv);

2074
	/* Enable Interrupt pacing if configured */
2075
	if (cpsw->coal_intvl != 0) {
2076 2077
		struct ethtool_coalesce coal;

2078
		coal.rx_coalesce_usecs = cpsw->coal_intvl;
2079 2080 2081
		cpsw_set_coalesce(ndev, &coal);
	}

2082 2083
	cpdma_ctlr_start(cpsw->dma);
	cpsw_intr_enable(cpsw);
2084
	cpsw->usage_count++;
2085

2086 2087
	return 0;

2088
err_cleanup:
2089
	cpdma_ctlr_stop(cpsw->dma);
2090
	for_each_slave(priv, cpsw_slave_stop, cpsw);
2091
	pm_runtime_put_sync(cpsw->dev);
2092 2093
	netif_carrier_off(priv->ndev);
	return ret;
2094 2095 2096 2097 2098
}

static int cpsw_ndo_stop(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2099
	struct cpsw_common *cpsw = priv->cpsw;
2100 2101

	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
2102
	__hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
2103
	netif_tx_stop_all_queues(priv->ndev);
2104
	netif_carrier_off(priv->ndev);
2105

2106
	if (cpsw->usage_count <= 1) {
2107 2108
		napi_disable(&cpsw->napi_rx);
		napi_disable(&cpsw->napi_tx);
2109
		cpts_unregister(cpsw->cpts);
2110 2111
		cpsw_intr_disable(cpsw);
		cpdma_ctlr_stop(cpsw->dma);
2112
		cpsw_ale_stop(cpsw->ale);
2113
	}
2114
	for_each_slave(priv, cpsw_slave_stop, cpsw);
2115 2116 2117 2118

	if (cpsw_need_resplit(cpsw))
		cpsw_split_res(ndev);

2119
	cpsw->usage_count--;
2120
	pm_runtime_put_sync(cpsw->dev);
2121 2122 2123 2124 2125 2126 2127
	return 0;
}

static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
				       struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2128
	struct cpsw_common *cpsw = priv->cpsw;
2129
	struct cpts *cpts = cpsw->cpts;
2130 2131 2132
	struct netdev_queue *txq;
	struct cpdma_chan *txch;
	int ret, q_idx;
2133 2134 2135

	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
		cpsw_err(priv, tx_err, "packet pad failed\n");
2136
		ndev->stats.tx_dropped++;
2137
		return NET_XMIT_DROP;
2138 2139
	}

2140
	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2141
	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
2142 2143
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;

2144 2145 2146 2147
	q_idx = skb_get_queue_mapping(skb);
	if (q_idx >= cpsw->tx_ch_num)
		q_idx = q_idx % cpsw->tx_ch_num;

2148
	txch = cpsw->txv[q_idx].ch;
2149
	txq = netdev_get_tx_queue(ndev, q_idx);
2150
	ret = cpsw_tx_packet_submit(priv, skb, txch);
2151 2152 2153 2154 2155
	if (unlikely(ret != 0)) {
		cpsw_err(priv, tx_err, "desc submit failed\n");
		goto fail;
	}

2156 2157 2158
	/* If there is no more tx desc left free then we need to
	 * tell the kernel to stop sending us tx frames.
	 */
2159 2160
	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
		netif_tx_stop_queue(txq);
2161 2162 2163 2164 2165 2166

		/* Barrier, so that stop_queue visible to other cpus */
		smp_mb__after_atomic();

		if (cpdma_check_free_tx_desc(txch))
			netif_tx_wake_queue(txq);
2167
	}
2168

2169 2170
	return NETDEV_TX_OK;
fail:
2171
	ndev->stats.tx_dropped++;
2172
	netif_tx_stop_queue(txq);
2173 2174 2175 2176 2177 2178 2179

	/* Barrier, so that stop_queue visible to other cpus */
	smp_mb__after_atomic();

	if (cpdma_check_free_tx_desc(txch))
		netif_tx_wake_queue(txq);

2180 2181 2182
	return NETDEV_TX_BUSY;
}

2183
#if IS_ENABLED(CONFIG_TI_CPTS)
2184

2185
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
2186
{
2187
	struct cpsw_common *cpsw = priv->cpsw;
2188
	struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
2189 2190
	u32 ts_en, seq_id;

2191
	if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
2192 2193 2194 2195 2196 2197 2198
		slave_write(slave, 0, CPSW1_TS_CTL);
		return;
	}

	seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
	ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;

2199
	if (priv->tx_ts_enabled)
2200 2201
		ts_en |= CPSW_V1_TS_TX_EN;

2202
	if (priv->rx_ts_enabled)
2203 2204 2205 2206 2207 2208 2209 2210
		ts_en |= CPSW_V1_TS_RX_EN;

	slave_write(slave, ts_en, CPSW1_TS_CTL);
	slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
}

static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
2211
	struct cpsw_slave *slave;
2212
	struct cpsw_common *cpsw = priv->cpsw;
2213 2214
	u32 ctrl, mtype;

2215
	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2216

2217
	ctrl = slave_read(slave, CPSW2_CONTROL);
2218
	switch (cpsw->version) {
2219 2220
	case CPSW_VERSION_2:
		ctrl &= ~CTRL_V2_ALL_TS_MASK;
2221

2222
		if (priv->tx_ts_enabled)
2223
			ctrl |= CTRL_V2_TX_TS_BITS;
2224

2225
		if (priv->rx_ts_enabled)
2226
			ctrl |= CTRL_V2_RX_TS_BITS;
2227
		break;
2228 2229 2230 2231
	case CPSW_VERSION_3:
	default:
		ctrl &= ~CTRL_V3_ALL_TS_MASK;

2232
		if (priv->tx_ts_enabled)
2233 2234
			ctrl |= CTRL_V3_TX_TS_BITS;

2235
		if (priv->rx_ts_enabled)
2236
			ctrl |= CTRL_V3_RX_TS_BITS;
2237
		break;
2238
	}
2239 2240 2241 2242 2243

	mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;

	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
	slave_write(slave, ctrl, CPSW2_CONTROL);
2244
	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
2245
	writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
2246 2247
}

2248
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2249
{
2250
	struct cpsw_priv *priv = netdev_priv(dev);
2251
	struct hwtstamp_config cfg;
2252
	struct cpsw_common *cpsw = priv->cpsw;
2253

2254 2255 2256
	if (cpsw->version != CPSW_VERSION_1 &&
	    cpsw->version != CPSW_VERSION_2 &&
	    cpsw->version != CPSW_VERSION_3)
2257 2258
		return -EOPNOTSUPP;

2259 2260 2261 2262 2263 2264 2265
	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
		return -EFAULT;

	/* reserved for future extensions */
	if (cfg.flags)
		return -EINVAL;

2266
	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
2267 2268 2269 2270
		return -ERANGE;

	switch (cfg.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
2271
		priv->rx_ts_enabled = 0;
2272 2273
		break;
	case HWTSTAMP_FILTER_ALL:
2274 2275
	case HWTSTAMP_FILTER_NTP_ALL:
		return -ERANGE;
2276 2277 2278
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2279
		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2280 2281
		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
		break;
2282 2283 2284 2285 2286 2287 2288 2289 2290
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
	case HWTSTAMP_FILTER_PTP_V2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2291
		priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2292 2293 2294 2295 2296 2297
		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
		break;
	default:
		return -ERANGE;
	}

2298
	priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
2299

2300
	switch (cpsw->version) {
2301
	case CPSW_VERSION_1:
2302
		cpsw_hwtstamp_v1(priv);
2303 2304
		break;
	case CPSW_VERSION_2:
2305
	case CPSW_VERSION_3:
2306 2307 2308
		cpsw_hwtstamp_v2(priv);
		break;
	default:
2309
		WARN_ON(1);
2310 2311 2312 2313 2314
	}

	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}

2315 2316
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
2317
	struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2318
	struct cpsw_priv *priv = netdev_priv(dev);
2319 2320
	struct hwtstamp_config cfg;

2321 2322 2323
	if (cpsw->version != CPSW_VERSION_1 &&
	    cpsw->version != CPSW_VERSION_2 &&
	    cpsw->version != CPSW_VERSION_3)
2324 2325 2326
		return -EOPNOTSUPP;

	cfg.flags = 0;
2327 2328
	cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
	cfg.rx_filter = priv->rx_ts_enabled;
2329 2330 2331

	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
2332 2333 2334 2335 2336
#else
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
	return -EOPNOTSUPP;
}
2337

2338 2339 2340 2341
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
	return -EOPNOTSUPP;
}
2342 2343 2344 2345
#endif /*CONFIG_TI_CPTS*/

static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
2346
	struct cpsw_priv *priv = netdev_priv(dev);
2347 2348
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
2349

2350 2351 2352
	if (!netif_running(dev))
		return -EINVAL;

2353 2354
	switch (cmd) {
	case SIOCSHWTSTAMP:
2355 2356 2357
		return cpsw_hwtstamp_set(dev, req);
	case SIOCGHWTSTAMP:
		return cpsw_hwtstamp_get(dev, req);
2358 2359
	}

2360
	if (!cpsw->slaves[slave_no].phy)
2361
		return -EOPNOTSUPP;
2362
	return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
2363 2364
}

2365 2366 2367
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2368
	struct cpsw_common *cpsw = priv->cpsw;
2369
	int ch;
2370 2371

	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
2372
	ndev->stats.tx_errors++;
2373
	cpsw_intr_disable(cpsw);
2374
	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
2375 2376
		cpdma_chan_stop(cpsw->txv[ch].ch);
		cpdma_chan_start(cpsw->txv[ch].ch);
2377 2378
	}

2379
	cpsw_intr_enable(cpsw);
2380 2381
	netif_trans_update(ndev);
	netif_tx_wake_all_queues(ndev);
2382 2383
}

2384 2385 2386 2387
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct sockaddr *addr = (struct sockaddr *)p;
2388
	struct cpsw_common *cpsw = priv->cpsw;
2389 2390
	int flags = 0;
	u16 vid = 0;
2391
	int ret;
2392 2393 2394 2395

	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

2396
	ret = pm_runtime_get_sync(cpsw->dev);
2397
	if (ret < 0) {
2398
		pm_runtime_put_noidle(cpsw->dev);
2399 2400 2401
		return ret;
	}

2402 2403
	if (cpsw->data.dual_emac) {
		vid = cpsw->slaves[priv->emac_port].port_vlan;
2404 2405 2406
		flags = ALE_VLAN;
	}

2407
	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
2408
			   flags, vid);
2409
	cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
2410 2411 2412 2413 2414 2415
			   flags, vid);

	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
	for_each_slave(priv, cpsw_set_slave_mac, priv);

2416
	pm_runtime_put(cpsw->dev);
2417

2418 2419 2420
	return 0;
}

2421 2422 2423
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cpsw_ndo_poll_controller(struct net_device *ndev)
{
2424
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2425

2426 2427 2428 2429
	cpsw_intr_disable(cpsw);
	cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
	cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
	cpsw_intr_enable(cpsw);
2430 2431 2432
}
#endif

2433 2434 2435 2436
static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
				unsigned short vid)
{
	int ret;
2437
	int unreg_mcast_mask = 0;
2438
	int mcast_mask;
2439
	u32 port_mask;
2440
	struct cpsw_common *cpsw = priv->cpsw;
2441

2442
	if (cpsw->data.dual_emac) {
2443
		port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
2444

2445
		mcast_mask = ALE_PORT_HOST;
2446
		if (priv->ndev->flags & IFF_ALLMULTI)
2447
			unreg_mcast_mask = mcast_mask;
2448 2449
	} else {
		port_mask = ALE_ALL_PORTS;
2450
		mcast_mask = port_mask;
2451 2452 2453 2454 2455 2456

		if (priv->ndev->flags & IFF_ALLMULTI)
			unreg_mcast_mask = ALE_ALL_PORTS;
		else
			unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
	}
2457

2458
	ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
2459
				unreg_mcast_mask);
2460 2461 2462
	if (ret != 0)
		return ret;

2463
	ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
2464
				 HOST_PORT_NUM, ALE_VLAN, vid);
2465 2466 2467
	if (ret != 0)
		goto clean_vid;

2468
	ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
2469
				 mcast_mask, ALE_VLAN, vid, 0);
2470 2471 2472 2473 2474
	if (ret != 0)
		goto clean_vlan_ucast;
	return 0;

clean_vlan_ucast:
2475
	cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2476
			   HOST_PORT_NUM, ALE_VLAN, vid);
2477
clean_vid:
2478
	cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2479 2480 2481 2482
	return ret;
}

static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2483
				    __be16 proto, u16 vid)
2484 2485
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2486
	struct cpsw_common *cpsw = priv->cpsw;
2487
	int ret;
2488

2489
	if (vid == cpsw->data.default_vlan)
2490 2491
		return 0;

2492
	ret = pm_runtime_get_sync(cpsw->dev);
2493
	if (ret < 0) {
2494
		pm_runtime_put_noidle(cpsw->dev);
2495 2496 2497
		return ret;
	}

2498
	if (cpsw->data.dual_emac) {
2499 2500 2501 2502 2503 2504
		/* In dual EMAC, reserved VLAN id should not be used for
		 * creating VLAN interfaces as this can break the dual
		 * EMAC port separation
		 */
		int i;

2505
		for (i = 0; i < cpsw->data.slaves; i++) {
2506 2507 2508 2509
			if (vid == cpsw->slaves[i].port_vlan) {
				ret = -EINVAL;
				goto err;
			}
2510 2511 2512
		}
	}

2513
	dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2514
	ret = cpsw_add_vlan_ale_entry(priv, vid);
2515
err:
2516
	pm_runtime_put(cpsw->dev);
2517
	return ret;
2518 2519 2520
}

static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2521
				     __be16 proto, u16 vid)
2522 2523
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2524
	struct cpsw_common *cpsw = priv->cpsw;
2525 2526
	int ret;

2527
	if (vid == cpsw->data.default_vlan)
2528 2529
		return 0;

2530
	ret = pm_runtime_get_sync(cpsw->dev);
2531
	if (ret < 0) {
2532
		pm_runtime_put_noidle(cpsw->dev);
2533 2534 2535
		return ret;
	}

2536
	if (cpsw->data.dual_emac) {
2537 2538
		int i;

2539 2540
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (vid == cpsw->slaves[i].port_vlan)
2541
				goto err;
2542 2543 2544
		}
	}

2545
	dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2546
	ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2547 2548 2549 2550
	ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
				  HOST_PORT_NUM, ALE_VLAN, vid);
	ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
				  0, ALE_VLAN, vid);
2551
	ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
2552
err:
2553
	pm_runtime_put(cpsw->dev);
2554
	return ret;
2555 2556
}

2557 2558 2559 2560
static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
2561
	struct cpsw_slave *slave;
2562
	u32 min_rate;
2563
	u32 ch_rate;
2564
	int i, ret;
2565 2566 2567 2568 2569

	ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
	if (ch_rate == rate)
		return 0;

2570 2571 2572 2573 2574
	ch_rate = rate * 1000;
	min_rate = cpdma_chan_get_min_rate(cpsw->dma);
	if ((ch_rate < min_rate && ch_rate)) {
		dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
			min_rate);
2575 2576 2577
		return -EINVAL;
	}

2578
	if (rate > cpsw->speed) {
2579
		dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
2580 2581 2582 2583 2584 2585 2586 2587 2588
		return -EINVAL;
	}

	ret = pm_runtime_get_sync(cpsw->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(cpsw->dev);
		return ret;
	}

2589 2590
	ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
	pm_runtime_put(cpsw->dev);
2591

2592 2593
	if (ret)
		return ret;
2594

2595 2596 2597 2598 2599 2600 2601 2602 2603
	/* update rates for slaves tx queues */
	for (i = 0; i < cpsw->data.slaves; i++) {
		slave = &cpsw->slaves[i];
		if (!slave->ndev)
			continue;

		netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
	}

2604
	cpsw_split_res(ndev);
2605 2606 2607
	return ret;
}

2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668
static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
{
	struct tc_mqprio_qopt_offload *mqprio = type_data;
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	int fifo, num_tc, count, offset;
	struct cpsw_slave *slave;
	u32 tx_prio_map = 0;
	int i, tc, ret;

	num_tc = mqprio->qopt.num_tc;
	if (num_tc > CPSW_TC_NUM)
		return -EINVAL;

	if (mqprio->mode != TC_MQPRIO_MODE_DCB)
		return -EINVAL;

	ret = pm_runtime_get_sync(cpsw->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(cpsw->dev);
		return ret;
	}

	if (num_tc) {
		for (i = 0; i < 8; i++) {
			tc = mqprio->qopt.prio_tc_map[i];
			fifo = cpsw_tc_to_fifo(tc, num_tc);
			tx_prio_map |= fifo << (4 * i);
		}

		netdev_set_num_tc(ndev, num_tc);
		for (i = 0; i < num_tc; i++) {
			count = mqprio->qopt.count[i];
			offset = mqprio->qopt.offset[i];
			netdev_set_tc_queue(ndev, i, count, offset);
		}
	}

	if (!mqprio->qopt.hw) {
		/* restore default configuration */
		netdev_reset_tc(ndev);
		tx_prio_map = TX_PRIORITY_MAPPING;
	}

	priv->mqprio_hw = mqprio->qopt.hw;

	offset = cpsw->version == CPSW_VERSION_1 ?
		 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;

	slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
	slave_write(slave, tx_prio_map, offset);

	pm_runtime_put_sync(cpsw->dev);

	return 0;
}

static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
			     void *type_data)
{
	switch (type) {
2669 2670 2671
	case TC_SETUP_QDISC_CBS:
		return cpsw_set_cbs(ndev, type_data);

2672 2673 2674 2675 2676 2677 2678 2679
	case TC_SETUP_QDISC_MQPRIO:
		return cpsw_set_mqprio(ndev, type_data);

	default:
		return -EOPNOTSUPP;
	}
}

2680 2681 2682 2683
static const struct net_device_ops cpsw_netdev_ops = {
	.ndo_open		= cpsw_ndo_open,
	.ndo_stop		= cpsw_ndo_stop,
	.ndo_start_xmit		= cpsw_ndo_start_xmit,
2684
	.ndo_set_mac_address	= cpsw_ndo_set_mac_address,
2685
	.ndo_do_ioctl		= cpsw_ndo_ioctl,
2686 2687
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_tx_timeout		= cpsw_ndo_tx_timeout,
2688
	.ndo_set_rx_mode	= cpsw_ndo_set_rx_mode,
2689
	.ndo_set_tx_maxrate	= cpsw_ndo_set_tx_maxrate,
2690 2691 2692
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= cpsw_ndo_poll_controller,
#endif
2693 2694
	.ndo_vlan_rx_add_vid	= cpsw_ndo_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= cpsw_ndo_vlan_rx_kill_vid,
2695
	.ndo_setup_tc           = cpsw_ndo_setup_tc,
2696 2697
};

2698 2699
static int cpsw_get_regs_len(struct net_device *ndev)
{
2700
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2701

2702
	return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
2703 2704 2705 2706 2707 2708
}

static void cpsw_get_regs(struct net_device *ndev,
			  struct ethtool_regs *regs, void *p)
{
	u32 *reg = p;
2709
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2710 2711

	/* update CPSW IP version */
2712
	regs->version = cpsw->version;
2713

2714
	cpsw_ale_dump(cpsw->ale, reg);
2715 2716
}

2717 2718 2719
static void cpsw_get_drvinfo(struct net_device *ndev,
			     struct ethtool_drvinfo *info)
{
2720
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2721
	struct platform_device	*pdev = to_platform_device(cpsw->dev);
2722

2723
	strlcpy(info->driver, "cpsw", sizeof(info->driver));
2724
	strlcpy(info->version, "1.0", sizeof(info->version));
2725
	strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739
}

static u32 cpsw_get_msglevel(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	return priv->msg_enable;
}

static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	priv->msg_enable = value;
}

2740
#if IS_ENABLED(CONFIG_TI_CPTS)
2741 2742 2743
static int cpsw_get_ts_info(struct net_device *ndev,
			    struct ethtool_ts_info *info)
{
2744
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2745 2746 2747 2748 2749 2750 2751 2752

	info->so_timestamping =
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE |
		SOF_TIMESTAMPING_RAW_HARDWARE;
2753
	info->phc_index = cpsw->cpts->phc_index;
2754 2755 2756 2757 2758
	info->tx_types =
		(1 << HWTSTAMP_TX_OFF) |
		(1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
2759
		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2760
		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2761 2762
	return 0;
}
2763
#else
2764 2765 2766
static int cpsw_get_ts_info(struct net_device *ndev,
			    struct ethtool_ts_info *info)
{
2767 2768 2769 2770 2771 2772 2773 2774 2775
	info->so_timestamping =
		SOF_TIMESTAMPING_TX_SOFTWARE |
		SOF_TIMESTAMPING_RX_SOFTWARE |
		SOF_TIMESTAMPING_SOFTWARE;
	info->phc_index = -1;
	info->tx_types = 0;
	info->rx_filters = 0;
	return 0;
}
2776
#endif
2777

2778 2779
static int cpsw_get_link_ksettings(struct net_device *ndev,
				   struct ethtool_link_ksettings *ecmd)
2780 2781
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2782 2783
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
2784

2785
	if (!cpsw->slaves[slave_no].phy)
2786
		return -EOPNOTSUPP;
2787 2788 2789

	phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
	return 0;
2790 2791
}

2792 2793
static int cpsw_set_link_ksettings(struct net_device *ndev,
				   const struct ethtool_link_ksettings *ecmd)
2794 2795
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2796 2797
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
2798

2799
	if (cpsw->slaves[slave_no].phy)
2800 2801
		return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
						 ecmd);
2802 2803 2804 2805
	else
		return -EOPNOTSUPP;
}

2806 2807 2808
static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2809 2810
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
2811 2812 2813 2814

	wol->supported = 0;
	wol->wolopts = 0;

2815 2816
	if (cpsw->slaves[slave_no].phy)
		phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
2817 2818 2819 2820 2821
}

static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2822 2823
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);
2824

2825 2826
	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
2827 2828 2829 2830
	else
		return -EOPNOTSUPP;
}

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
static void cpsw_get_pauseparam(struct net_device *ndev,
				struct ethtool_pauseparam *pause)
{
	struct cpsw_priv *priv = netdev_priv(ndev);

	pause->autoneg = AUTONEG_DISABLE;
	pause->rx_pause = priv->rx_pause ? true : false;
	pause->tx_pause = priv->tx_pause ? true : false;
}

static int cpsw_set_pauseparam(struct net_device *ndev,
			       struct ethtool_pauseparam *pause)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	bool link;

	priv->rx_pause = pause->rx_pause ? true : false;
	priv->tx_pause = pause->tx_pause ? true : false;

	for_each_slave(priv, _cpsw_adjust_link, priv, &link);
	return 0;
}

2854 2855 2856
static int cpsw_ethtool_op_begin(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
2857
	struct cpsw_common *cpsw = priv->cpsw;
2858 2859
	int ret;

2860
	ret = pm_runtime_get_sync(cpsw->dev);
2861 2862
	if (ret < 0) {
		cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
2863
		pm_runtime_put_noidle(cpsw->dev);
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
	}

	return ret;
}

static void cpsw_ethtool_op_complete(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	int ret;

2874
	ret = pm_runtime_put(priv->cpsw->dev);
2875 2876 2877 2878
	if (ret < 0)
		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
}

2879 2880 2881 2882 2883
static void cpsw_get_channels(struct net_device *ndev,
			      struct ethtool_channels *ch)
{
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);

2884 2885
	ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
	ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896
	ch->max_combined = 0;
	ch->max_other = 0;
	ch->other_count = 0;
	ch->rx_count = cpsw->rx_ch_num;
	ch->tx_count = cpsw->tx_ch_num;
	ch->combined_count = 0;
}

static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
				  struct ethtool_channels *ch)
{
2897 2898 2899 2900 2901
	if (cpsw->quirk_irq) {
		dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
		return -EOPNOTSUPP;
	}

2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
	if (ch->combined_count)
		return -EINVAL;

	/* verify we have at least one channel in each direction */
	if (!ch->rx_count || !ch->tx_count)
		return -EINVAL;

	if (ch->rx_count > cpsw->data.channels ||
	    ch->tx_count > cpsw->data.channels)
		return -EINVAL;

	return 0;
}

static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
{
	struct cpsw_common *cpsw = priv->cpsw;
	void (*handler)(void *, int, int);
2920
	struct netdev_queue *queue;
2921
	struct cpsw_vector *vec;
2922
	int ret, *ch, vch;
2923 2924 2925

	if (rx) {
		ch = &cpsw->rx_ch_num;
2926
		vec = cpsw->rxv;
2927 2928 2929
		handler = cpsw_rx_handler;
	} else {
		ch = &cpsw->tx_ch_num;
2930
		vec = cpsw->txv;
2931 2932 2933 2934
		handler = cpsw_tx_handler;
	}

	while (*ch < ch_num) {
2935 2936
		vch = rx ? *ch : 7 - *ch;
		vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
2937 2938
		queue = netdev_get_tx_queue(priv->ndev, *ch);
		queue->tx_maxrate = 0;
2939

2940 2941
		if (IS_ERR(vec[*ch].ch))
			return PTR_ERR(vec[*ch].ch);
2942

2943
		if (!vec[*ch].ch)
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
			return -EINVAL;

		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
			  (rx ? "rx" : "tx"));
		(*ch)++;
	}

	while (*ch > ch_num) {
		(*ch)--;

2954
		ret = cpdma_chan_destroy(vec[*ch].ch);
2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
		if (ret)
			return ret;

		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
			  (rx ? "rx" : "tx"));
	}

	return 0;
}

static int cpsw_update_channels(struct cpsw_priv *priv,
				struct ethtool_channels *ch)
{
	int ret;

	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
	if (ret)
		return ret;

	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
	if (ret)
		return ret;

	return 0;
}

2981
static void cpsw_suspend_data_pass(struct net_device *ndev)
2982
{
2983
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2984
	struct cpsw_slave *slave;
2985
	int i;
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002

	/* Disable NAPI scheduling */
	cpsw_intr_disable(cpsw);

	/* Stop all transmit queues for every network device.
	 * Disable re-using rx descriptors with dormant_on.
	 */
	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
		if (!(slave->ndev && netif_running(slave->ndev)))
			continue;

		netif_tx_stop_all_queues(slave->ndev);
		netif_dormant_on(slave->ndev);
	}

	/* Handle rest of tx packets and stop cpdma channels */
	cpdma_ctlr_stop(cpsw->dma);
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017
}

static int cpsw_resume_data_pass(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	int i, ret;

	/* Allow rx packets handling */
	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
		if (slave->ndev && netif_running(slave->ndev))
			netif_dormant_off(slave->ndev);

	/* After this receive is started */
3018
	if (cpsw->usage_count) {
3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047
		ret = cpsw_fill_rx_channels(priv);
		if (ret)
			return ret;

		cpdma_ctlr_start(cpsw->dma);
		cpsw_intr_enable(cpsw);
	}

	/* Resume transmit for every affected interface */
	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
		if (slave->ndev && netif_running(slave->ndev))
			netif_tx_start_all_queues(slave->ndev);

	return 0;
}

static int cpsw_set_channels(struct net_device *ndev,
			     struct ethtool_channels *chs)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	struct cpsw_slave *slave;
	int i, ret;

	ret = cpsw_check_ch_settings(cpsw, chs);
	if (ret < 0)
		return ret;

	cpsw_suspend_data_pass(ndev);
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071
	ret = cpsw_update_channels(priv, chs);
	if (ret)
		goto err;

	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
		if (!(slave->ndev && netif_running(slave->ndev)))
			continue;

		/* Inform stack about new count of queues */
		ret = netif_set_real_num_tx_queues(slave->ndev,
						   cpsw->tx_ch_num);
		if (ret) {
			dev_err(priv->dev, "cannot set real number of tx queues\n");
			goto err;
		}

		ret = netif_set_real_num_rx_queues(slave->ndev,
						   cpsw->rx_ch_num);
		if (ret) {
			dev_err(priv->dev, "cannot set real number of rx queues\n");
			goto err;
		}
	}

3072
	if (cpsw->usage_count)
3073
		cpsw_split_res(ndev);
3074

3075 3076 3077
	ret = cpsw_resume_data_pass(ndev);
	if (!ret)
		return 0;
3078 3079 3080 3081 3082 3083
err:
	dev_err(priv->dev, "cannot update channels number, closing device\n");
	dev_close(ndev);
	return ret;
}

3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);

	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
	else
		return -EOPNOTSUPP;
}

static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);

	if (cpsw->slaves[slave_no].phy)
		return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
	else
		return -EOPNOTSUPP;
}

3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
static int cpsw_nway_reset(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
	int slave_no = cpsw_slave_index(cpsw, priv);

	if (cpsw->slaves[slave_no].phy)
		return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
	else
		return -EOPNOTSUPP;
}

3120 3121 3122 3123 3124 3125 3126 3127 3128
static void cpsw_get_ringparam(struct net_device *ndev,
			       struct ethtool_ringparam *ering)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;

	/* not supported */
	ering->tx_max_pending = 0;
	ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
3129
	ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
3130 3131 3132 3133 3134 3135 3136 3137
	ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
}

static int cpsw_set_ringparam(struct net_device *ndev,
			      struct ethtool_ringparam *ering)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	struct cpsw_common *cpsw = priv->cpsw;
3138
	int ret;
3139 3140 3141 3142

	/* ignore ering->tx_pending - only rx_pending adjustment is supported */

	if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
3143 3144
	    ering->rx_pending < CPSW_MAX_QUEUES ||
	    ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
3145 3146 3147 3148 3149
		return -EINVAL;

	if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
		return 0;

3150
	cpsw_suspend_data_pass(ndev);
3151 3152 3153

	cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);

3154
	if (cpsw->usage_count)
3155 3156
		cpdma_chan_split_pool(cpsw->dma);

3157 3158 3159
	ret = cpsw_resume_data_pass(ndev);
	if (!ret)
		return 0;
3160

3161
	dev_err(&ndev->dev, "cannot set ring params, closing device\n");
3162 3163 3164 3165
	dev_close(ndev);
	return ret;
}

3166 3167 3168 3169 3170
static const struct ethtool_ops cpsw_ethtool_ops = {
	.get_drvinfo	= cpsw_get_drvinfo,
	.get_msglevel	= cpsw_get_msglevel,
	.set_msglevel	= cpsw_set_msglevel,
	.get_link	= ethtool_op_get_link,
3171
	.get_ts_info	= cpsw_get_ts_info,
3172 3173
	.get_coalesce	= cpsw_get_coalesce,
	.set_coalesce	= cpsw_set_coalesce,
3174 3175 3176
	.get_sset_count		= cpsw_get_sset_count,
	.get_strings		= cpsw_get_strings,
	.get_ethtool_stats	= cpsw_get_ethtool_stats,
3177 3178
	.get_pauseparam		= cpsw_get_pauseparam,
	.set_pauseparam		= cpsw_set_pauseparam,
3179 3180
	.get_wol	= cpsw_get_wol,
	.set_wol	= cpsw_set_wol,
3181 3182
	.get_regs_len	= cpsw_get_regs_len,
	.get_regs	= cpsw_get_regs,
3183 3184
	.begin		= cpsw_ethtool_op_begin,
	.complete	= cpsw_ethtool_op_complete,
3185 3186
	.get_channels	= cpsw_get_channels,
	.set_channels	= cpsw_set_channels,
3187 3188
	.get_link_ksettings	= cpsw_get_link_ksettings,
	.set_link_ksettings	= cpsw_set_link_ksettings,
3189 3190
	.get_eee	= cpsw_get_eee,
	.set_eee	= cpsw_set_eee,
3191
	.nway_reset	= cpsw_nway_reset,
3192 3193
	.get_ringparam = cpsw_get_ringparam,
	.set_ringparam = cpsw_set_ringparam,
3194 3195
};

3196
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
3197
			    u32 slave_reg_ofs, u32 sliver_reg_ofs)
3198
{
3199
	void __iomem		*regs = cpsw->regs;
3200
	int			slave_num = slave->slave_num;
3201
	struct cpsw_slave_data	*data = cpsw->data.slave_data + slave_num;
3202 3203

	slave->data	= data;
3204 3205
	slave->regs	= regs + slave_reg_ofs;
	slave->sliver	= regs + sliver_reg_ofs;
3206
	slave->port_vlan = data->dual_emac_res_vlan;
3207 3208
}

3209
static int cpsw_probe_dt(struct cpsw_platform_data *data,
3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
			 struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	struct device_node *slave_node;
	int i = 0, ret;
	u32 prop;

	if (!node)
		return -EINVAL;

	if (of_property_read_u32(node, "slaves", &prop)) {
3221
		dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
3222 3223 3224 3225
		return -EINVAL;
	}
	data->slaves = prop;

3226
	if (of_property_read_u32(node, "active_slave", &prop)) {
3227
		dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
3228
		return -EINVAL;
3229
	}
3230
	data->active_slave = prop;
3231

3232 3233 3234
	data->slave_data = devm_kcalloc(&pdev->dev,
					data->slaves,
					sizeof(struct cpsw_slave_data),
3235
					GFP_KERNEL);
3236
	if (!data->slave_data)
3237
		return -ENOMEM;
3238 3239

	if (of_property_read_u32(node, "cpdma_channels", &prop)) {
3240
		dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
3241
		return -EINVAL;
3242 3243 3244 3245
	}
	data->channels = prop;

	if (of_property_read_u32(node, "ale_entries", &prop)) {
3246
		dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
3247
		return -EINVAL;
3248 3249 3250 3251
	}
	data->ale_entries = prop;

	if (of_property_read_u32(node, "bd_ram_size", &prop)) {
3252
		dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
3253
		return -EINVAL;
3254 3255 3256 3257
	}
	data->bd_ram_size = prop;

	if (of_property_read_u32(node, "mac_control", &prop)) {
3258
		dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
3259
		return -EINVAL;
3260 3261 3262
	}
	data->mac_control = prop;

3263 3264
	if (of_property_read_bool(node, "dual_emac"))
		data->dual_emac = 1;
3265

3266 3267 3268 3269 3270 3271
	/*
	 * Populate all the child nodes here...
	 */
	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
	/* We do not want to force this, as in some cases may not have child */
	if (ret)
3272
		dev_warn(&pdev->dev, "Doesn't have any child node\n");
3273

3274
	for_each_available_child_of_node(node, slave_node) {
3275 3276
		struct cpsw_slave_data *slave_data = data->slave_data + i;
		const void *mac_addr = NULL;
3277 3278 3279
		int lenp;
		const __be32 *parp;

3280
		/* This is no slave child node, continue */
3281
		if (!of_node_name_eq(slave_node, "slave"))
3282 3283
			continue;

3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
		slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
						    NULL);
		if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
		    IS_ERR(slave_data->ifphy)) {
			ret = PTR_ERR(slave_data->ifphy);
			dev_err(&pdev->dev,
				"%d: Error retrieving port phy: %d\n", i, ret);
			return ret;
		}

3294 3295
		slave_data->phy_node = of_parse_phandle(slave_node,
							"phy-handle", 0);
3296
		parp = of_get_property(slave_node, "phy_id", &lenp);
3297 3298
		if (slave_data->phy_node) {
			dev_dbg(&pdev->dev,
3299 3300
				"slave[%d] using phy-handle=\"%pOF\"\n",
				i, slave_data->phy_node);
3301
		} else if (of_phy_is_fixed_link(slave_node)) {
3302 3303 3304
			/* In the case of a fixed PHY, the DT node associated
			 * to the PHY is the Ethernet MAC DT node.
			 */
3305
			ret = of_phy_register_fixed_link(slave_node);
3306 3307 3308
			if (ret) {
				if (ret != -EPROBE_DEFER)
					dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
3309
				return ret;
3310
			}
3311
			slave_data->phy_node = of_node_get(slave_node);
3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
		} else if (parp) {
			u32 phyid;
			struct device_node *mdio_node;
			struct platform_device *mdio;

			if (lenp != (sizeof(__be32) * 2)) {
				dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
				goto no_phy_slave;
			}
			mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
			phyid = be32_to_cpup(parp+1);
			mdio = of_find_device_by_node(mdio_node);
			of_node_put(mdio_node);
			if (!mdio) {
				dev_err(&pdev->dev, "Missing mdio platform device\n");
				return -EINVAL;
			}
			snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
				 PHY_ID_FMT, mdio->name, phyid);
3331
			put_device(&mdio->dev);
3332
		} else {
3333 3334 3335
			dev_err(&pdev->dev,
				"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
				i);
3336
			goto no_phy_slave;
3337
		}
3338 3339 3340 3341 3342 3343 3344 3345
		slave_data->phy_if = of_get_phy_mode(slave_node);
		if (slave_data->phy_if < 0) {
			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
				i);
			return slave_data->phy_if;
		}

no_phy_slave:
3346
		mac_addr = of_get_mac_address(slave_node);
3347
		if (mac_addr) {
3348
			memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
3349
		} else {
3350 3351 3352 3353
			ret = ti_cm_get_macid(&pdev->dev, i,
					      slave_data->mac_addr);
			if (ret)
				return ret;
3354
		}
3355
		if (data->dual_emac) {
3356
			if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
3357
						 &prop)) {
3358
				dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
3359
				slave_data->dual_emac_res_vlan = i+1;
3360 3361
				dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
					slave_data->dual_emac_res_vlan, i);
3362 3363 3364 3365 3366
			} else {
				slave_data->dual_emac_res_vlan = prop;
			}
		}

3367
		i++;
3368 3369
		if (i == data->slaves)
			break;
3370 3371 3372 3373 3374
	}

	return 0;
}

3375 3376
static void cpsw_remove_dt(struct platform_device *pdev)
{
3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
	struct net_device *ndev = platform_get_drvdata(pdev);
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
	struct cpsw_platform_data *data = &cpsw->data;
	struct device_node *node = pdev->dev.of_node;
	struct device_node *slave_node;
	int i = 0;

	for_each_available_child_of_node(node, slave_node) {
		struct cpsw_slave_data *slave_data = &data->slave_data[i];

3387
		if (!of_node_name_eq(slave_node, "slave"))
3388 3389
			continue;

3390 3391
		if (of_phy_is_fixed_link(slave_node))
			of_phy_deregister_fixed_link(slave_node);
3392 3393 3394 3395 3396 3397 3398 3399

		of_node_put(slave_data->phy_node);

		i++;
		if (i == data->slaves)
			break;
	}

3400 3401 3402
	of_platform_depopulate(&pdev->dev);
}

3403
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
3404
{
3405 3406
	struct cpsw_common		*cpsw = priv->cpsw;
	struct cpsw_platform_data	*data = &cpsw->data;
3407 3408
	struct net_device		*ndev;
	struct cpsw_priv		*priv_sl2;
3409
	int ret = 0;
3410

3411
	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3412
	if (!ndev) {
3413
		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
3414 3415 3416 3417
		return -ENOMEM;
	}

	priv_sl2 = netdev_priv(ndev);
3418
	priv_sl2->cpsw = cpsw;
3419 3420 3421 3422 3423 3424 3425
	priv_sl2->ndev = ndev;
	priv_sl2->dev  = &ndev->dev;
	priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);

	if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
		memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
			ETH_ALEN);
3426 3427
		dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
			 priv_sl2->mac_addr);
3428
	} else {
3429
		eth_random_addr(priv_sl2->mac_addr);
3430 3431
		dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
			 priv_sl2->mac_addr);
3432 3433 3434 3435
	}
	memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);

	priv_sl2->emac_port = 1;
3436
	cpsw->slaves[1].ndev = ndev;
3437
	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3438 3439

	ndev->netdev_ops = &cpsw_netdev_ops;
3440
	ndev->ethtool_ops = &cpsw_ethtool_ops;
3441 3442

	/* register the network device */
3443
	SET_NETDEV_DEV(ndev, cpsw->dev);
3444 3445
	ret = register_netdev(ndev);
	if (ret) {
3446
		dev_err(cpsw->dev, "cpsw: error registering net device\n");
3447 3448 3449 3450 3451 3452 3453
		free_netdev(ndev);
		ret = -ENODEV;
	}

	return ret;
}

3454
static const struct of_device_id cpsw_of_mtable[] = {
3455 3456 3457 3458
	{ .compatible = "ti,cpsw"},
	{ .compatible = "ti,am335x-cpsw"},
	{ .compatible = "ti,am4372-cpsw"},
	{ .compatible = "ti,dra7-cpsw"},
3459 3460 3461 3462
	{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, cpsw_of_mtable);

3463 3464 3465 3466 3467
static const struct soc_device_attribute cpsw_soc_devices[] = {
	{ .family = "AM33xx", .revision = "ES1.0"},
	{ /* sentinel */ }
};

B
Bill Pemberton 已提交
3468
static int cpsw_probe(struct platform_device *pdev)
3469
{
3470
	struct clk			*clk;
3471
	struct cpsw_platform_data	*data;
3472 3473 3474 3475
	struct net_device		*ndev;
	struct cpsw_priv		*priv;
	struct cpdma_params		dma_params;
	struct cpsw_ale_params		ale_params;
3476
	void __iomem			*ss_regs;
3477
	void __iomem			*cpts_regs;
3478
	struct resource			*res, *ss_res;
3479
	struct gpio_descs		*mode;
3480
	u32 slave_offset, sliver_offset, slave_size;
3481
	const struct soc_device_attribute *soc;
3482
	struct cpsw_common		*cpsw;
3483
	int ret = 0, i, ch;
3484
	int irq;
3485

3486
	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
3487 3488 3489
	if (!cpsw)
		return -ENOMEM;

3490
	cpsw->dev = &pdev->dev;
3491

3492
	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
3493
	if (!ndev) {
3494
		dev_err(&pdev->dev, "error allocating net_device\n");
3495 3496 3497 3498 3499
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, ndev);
	priv = netdev_priv(ndev);
3500
	priv->cpsw = cpsw;
3501 3502 3503
	priv->ndev = ndev;
	priv->dev  = &ndev->dev;
	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
3504
	cpsw->rx_packet_max = max(rx_packet_max, 128);
3505

3506 3507 3508 3509 3510 3511 3512
	mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
	if (IS_ERR(mode)) {
		ret = PTR_ERR(mode);
		dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
		goto clean_ndev_ret;
	}

3513 3514 3515 3516 3517
	/*
	 * This may be required here for child devices.
	 */
	pm_runtime_enable(&pdev->dev);

3518 3519 3520
	/* Select default pin state */
	pinctrl_pm_select_default_state(&pdev->dev);

3521 3522 3523 3524 3525 3526
	/* Need to enable clocks with runtime PM api to access module
	 * registers
	 */
	ret = pm_runtime_get_sync(&pdev->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(&pdev->dev);
3527
		goto clean_runtime_disable_ret;
3528
	}
3529

3530 3531
	ret = cpsw_probe_dt(&cpsw->data, pdev);
	if (ret)
3532
		goto clean_dt_ret;
3533

3534
	data = &cpsw->data;
3535 3536
	cpsw->rx_ch_num = 1;
	cpsw->tx_ch_num = 1;
3537

3538 3539
	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
3540
		dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
3541
	} else {
J
Joe Perches 已提交
3542
		eth_random_addr(priv->mac_addr);
3543
		dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
3544 3545 3546 3547
	}

	memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);

3548 3549
	cpsw->slaves = devm_kcalloc(&pdev->dev,
				    data->slaves, sizeof(struct cpsw_slave),
3550
				    GFP_KERNEL);
3551
	if (!cpsw->slaves) {
3552
		ret = -ENOMEM;
3553
		goto clean_dt_ret;
3554 3555
	}
	for (i = 0; i < data->slaves; i++)
3556
		cpsw->slaves[i].slave_num = i;
3557

3558
	cpsw->slaves[0].ndev = ndev;
3559 3560
	priv->emac_port = 0;

3561 3562
	clk = devm_clk_get(&pdev->dev, "fck");
	if (IS_ERR(clk)) {
3563
		dev_err(priv->dev, "fck is not found\n");
3564
		ret = -ENODEV;
3565
		goto clean_dt_ret;
3566
	}
3567
	cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
3568

3569 3570 3571 3572
	ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
	if (IS_ERR(ss_regs)) {
		ret = PTR_ERR(ss_regs);
3573
		goto clean_dt_ret;
3574
	}
3575
	cpsw->regs = ss_regs;
3576

3577
	cpsw->version = readl(&cpsw->regs->id_ver);
3578

3579
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3580 3581 3582
	cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(cpsw->wr_regs)) {
		ret = PTR_ERR(cpsw->wr_regs);
3583
		goto clean_dt_ret;
3584 3585 3586
	}

	memset(&dma_params, 0, sizeof(dma_params));
3587 3588
	memset(&ale_params, 0, sizeof(ale_params));

3589
	switch (cpsw->version) {
3590
	case CPSW_VERSION_1:
3591
		cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
3592
		cpts_regs		= ss_regs + CPSW1_CPTS_OFFSET;
3593
		cpsw->hw_stats	     = ss_regs + CPSW1_HW_STATS;
3594 3595 3596 3597 3598 3599 3600 3601 3602
		dma_params.dmaregs   = ss_regs + CPSW1_CPDMA_OFFSET;
		dma_params.txhdp     = ss_regs + CPSW1_STATERAM_OFFSET;
		ale_params.ale_regs  = ss_regs + CPSW1_ALE_OFFSET;
		slave_offset         = CPSW1_SLAVE_OFFSET;
		slave_size           = CPSW1_SLAVE_SIZE;
		sliver_offset        = CPSW1_SLIVER_OFFSET;
		dma_params.desc_mem_phys = 0;
		break;
	case CPSW_VERSION_2:
3603
	case CPSW_VERSION_3:
3604
	case CPSW_VERSION_4:
3605
		cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
3606
		cpts_regs		= ss_regs + CPSW2_CPTS_OFFSET;
3607
		cpsw->hw_stats	     = ss_regs + CPSW2_HW_STATS;
3608 3609 3610 3611 3612 3613 3614
		dma_params.dmaregs   = ss_regs + CPSW2_CPDMA_OFFSET;
		dma_params.txhdp     = ss_regs + CPSW2_STATERAM_OFFSET;
		ale_params.ale_regs  = ss_regs + CPSW2_ALE_OFFSET;
		slave_offset         = CPSW2_SLAVE_OFFSET;
		slave_size           = CPSW2_SLAVE_SIZE;
		sliver_offset        = CPSW2_SLIVER_OFFSET;
		dma_params.desc_mem_phys =
3615
			(u32 __force) ss_res->start + CPSW2_BD_OFFSET;
3616 3617
		break;
	default:
3618
		dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
3619
		ret = -ENODEV;
3620
		goto clean_dt_ret;
3621
	}
3622 3623 3624 3625
	for (i = 0; i < cpsw->data.slaves; i++) {
		struct cpsw_slave *slave = &cpsw->slaves[i];

		cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
3626 3627 3628 3629
		slave_offset  += slave_size;
		sliver_offset += SLIVER_SIZE;
	}

3630
	dma_params.dev		= &pdev->dev;
3631 3632 3633 3634 3635
	dma_params.rxthresh	= dma_params.dmaregs + CPDMA_RXTHRESH;
	dma_params.rxfree	= dma_params.dmaregs + CPDMA_RXFREE;
	dma_params.rxhdp	= dma_params.txhdp + CPDMA_RXHDP;
	dma_params.txcp		= dma_params.txhdp + CPDMA_TXCP;
	dma_params.rxcp		= dma_params.txhdp + CPDMA_RXCP;
3636 3637 3638 3639 3640 3641 3642

	dma_params.num_chan		= data->channels;
	dma_params.has_soft_reset	= true;
	dma_params.min_packet_size	= CPSW_MIN_PACKET_SIZE;
	dma_params.desc_mem_size	= data->bd_ram_size;
	dma_params.desc_align		= 16;
	dma_params.has_ext_regs		= true;
3643
	dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
3644
	dma_params.bus_freq_mhz		= cpsw->bus_freq_mhz;
3645
	dma_params.descs_pool_size	= descs_pool_size;
3646

3647 3648
	cpsw->dma = cpdma_ctlr_create(&dma_params);
	if (!cpsw->dma) {
3649 3650
		dev_err(priv->dev, "error initializing dma\n");
		ret = -ENOMEM;
3651
		goto clean_dt_ret;
3652 3653
	}

3654 3655 3656 3657
	soc = soc_device_match(cpsw_soc_devices);
	if (soc)
		cpsw->quirk_irq = 1;

3658 3659
	ch = cpsw->quirk_irq ? 0 : 7;
	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
3660 3661 3662 3663 3664 3665
	if (IS_ERR(cpsw->txv[0].ch)) {
		dev_err(priv->dev, "error initializing tx dma channel\n");
		ret = PTR_ERR(cpsw->txv[0].ch);
		goto clean_dma_ret;
	}

3666
	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
3667 3668 3669
	if (IS_ERR(cpsw->rxv[0].ch)) {
		dev_err(priv->dev, "error initializing rx dma channel\n");
		ret = PTR_ERR(cpsw->rxv[0].ch);
3670 3671 3672
		goto clean_dma_ret;
	}

3673
	ale_params.dev			= &pdev->dev;
3674 3675
	ale_params.ale_ageout		= ale_ageout;
	ale_params.ale_entries		= data->ale_entries;
3676
	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
3677

3678 3679
	cpsw->ale = cpsw_ale_create(&ale_params);
	if (!cpsw->ale) {
3680 3681 3682 3683 3684
		dev_err(priv->dev, "error initializing ale engine\n");
		ret = -ENODEV;
		goto clean_dma_ret;
	}

3685
	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
3686 3687
	if (IS_ERR(cpsw->cpts)) {
		ret = PTR_ERR(cpsw->cpts);
3688
		goto clean_dma_ret;
3689 3690
	}

3691
	ndev->irq = platform_get_irq(pdev, 1);
3692 3693
	if (ndev->irq < 0) {
		dev_err(priv->dev, "error getting irq resource\n");
3694
		ret = ndev->irq;
3695
		goto clean_dma_ret;
3696 3697
	}

3698
	ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
3699 3700 3701

	ndev->netdev_ops = &cpsw_netdev_ops;
	ndev->ethtool_ops = &cpsw_ethtool_ops;
3702 3703 3704 3705 3706 3707
	netif_napi_add(ndev, &cpsw->napi_rx,
		       cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
		       CPSW_POLL_WEIGHT);
	netif_tx_napi_add(ndev, &cpsw->napi_tx,
			  cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
			  CPSW_POLL_WEIGHT);
3708 3709 3710 3711 3712 3713 3714 3715
	cpsw_split_res(ndev);

	/* register the network device */
	SET_NETDEV_DEV(ndev, &pdev->dev);
	ret = register_netdev(ndev);
	if (ret) {
		dev_err(priv->dev, "error registering net device\n");
		ret = -ENODEV;
3716
		goto clean_dma_ret;
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726
	}

	if (cpsw->data.dual_emac) {
		ret = cpsw_probe_dual_emac(priv);
		if (ret) {
			cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
			goto clean_unregister_netdev_ret;
		}
	}

3727 3728 3729 3730 3731 3732 3733
	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
	 * MISC IRQs which are always kept disabled with this driver so
	 * we will not request them.
	 *
	 * If anyone wants to implement support for those, make sure to
	 * first request and append them to irqs_table array.
	 */
3734

3735
	/* RX IRQ */
3736
	irq = platform_get_irq(pdev, 1);
3737 3738
	if (irq < 0) {
		ret = irq;
3739
		goto clean_dma_ret;
3740
	}
3741

3742
	cpsw->irqs_table[0] = irq;
3743
	ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
3744
			       0, dev_name(&pdev->dev), cpsw);
3745 3746
	if (ret < 0) {
		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3747
		goto clean_dma_ret;
3748 3749
	}

3750
	/* TX IRQ */
3751
	irq = platform_get_irq(pdev, 2);
3752 3753
	if (irq < 0) {
		ret = irq;
3754
		goto clean_dma_ret;
3755
	}
3756

3757
	cpsw->irqs_table[1] = irq;
3758
	ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
3759
			       0, dev_name(&pdev->dev), cpsw);
3760 3761
	if (ret < 0) {
		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
3762
		goto clean_dma_ret;
3763
	}
3764

3765 3766 3767
	cpsw_notice(priv, probe,
		    "initialized device (regs %pa, irq %d, pool size %d)\n",
		    &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3768

3769 3770
	pm_runtime_put(&pdev->dev);

3771 3772
	return 0;

3773 3774
clean_unregister_netdev_ret:
	unregister_netdev(ndev);
3775
clean_dma_ret:
3776
	cpdma_ctlr_destroy(cpsw->dma);
3777 3778
clean_dt_ret:
	cpsw_remove_dt(pdev);
3779
	pm_runtime_put_sync(&pdev->dev);
3780
clean_runtime_disable_ret:
3781
	pm_runtime_disable(&pdev->dev);
3782
clean_ndev_ret:
3783
	free_netdev(priv->ndev);
3784 3785 3786
	return ret;
}

B
Bill Pemberton 已提交
3787
static int cpsw_remove(struct platform_device *pdev)
3788 3789
{
	struct net_device *ndev = platform_get_drvdata(pdev);
3790
	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3791 3792 3793 3794 3795 3796 3797
	int ret;

	ret = pm_runtime_get_sync(&pdev->dev);
	if (ret < 0) {
		pm_runtime_put_noidle(&pdev->dev);
		return ret;
	}
3798

3799 3800
	if (cpsw->data.dual_emac)
		unregister_netdev(cpsw->slaves[1].ndev);
3801
	unregister_netdev(ndev);
3802

3803
	cpts_release(cpsw->cpts);
3804
	cpdma_ctlr_destroy(cpsw->dma);
3805
	cpsw_remove_dt(pdev);
3806 3807
	pm_runtime_put_sync(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
3808 3809
	if (cpsw->data.dual_emac)
		free_netdev(cpsw->slaves[1].ndev);
3810 3811 3812 3813
	free_netdev(ndev);
	return 0;
}

3814
#ifdef CONFIG_PM_SLEEP
3815 3816
static int cpsw_suspend(struct device *dev)
{
3817
	struct net_device	*ndev = dev_get_drvdata(dev);
3818
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3819

3820
	if (cpsw->data.dual_emac) {
3821
		int i;
3822

3823 3824 3825
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (netif_running(cpsw->slaves[i].ndev))
				cpsw_ndo_stop(cpsw->slaves[i].ndev);
3826 3827 3828 3829 3830
		}
	} else {
		if (netif_running(ndev))
			cpsw_ndo_stop(ndev);
	}
3831

3832
	/* Select sleep pin state */
3833
	pinctrl_pm_select_sleep_state(dev);
3834

3835 3836 3837 3838 3839
	return 0;
}

static int cpsw_resume(struct device *dev)
{
3840
	struct net_device	*ndev = dev_get_drvdata(dev);
3841
	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
3842

3843
	/* Select default pin state */
3844
	pinctrl_pm_select_default_state(dev);
3845

3846 3847
	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
	rtnl_lock();
3848
	if (cpsw->data.dual_emac) {
3849 3850
		int i;

3851 3852 3853
		for (i = 0; i < cpsw->data.slaves; i++) {
			if (netif_running(cpsw->slaves[i].ndev))
				cpsw_ndo_open(cpsw->slaves[i].ndev);
3854 3855 3856 3857 3858
		}
	} else {
		if (netif_running(ndev))
			cpsw_ndo_open(ndev);
	}
3859 3860
	rtnl_unlock();

3861 3862
	return 0;
}
3863
#endif
3864

3865
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
3866 3867 3868 3869 3870

static struct platform_driver cpsw_driver = {
	.driver = {
		.name	 = "cpsw",
		.pm	 = &cpsw_pm_ops,
3871
		.of_match_table = cpsw_of_mtable,
3872 3873
	},
	.probe = cpsw_probe,
B
Bill Pemberton 已提交
3874
	.remove = cpsw_remove,
3875 3876
};

3877
module_platform_driver(cpsw_driver);
3878 3879 3880 3881 3882

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
MODULE_DESCRIPTION("TI CPSW Ethernet driver");