mvneta.c 121.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
 *
 * Copyright (C) 2012 Marvell
 *
 * Rami Rosen <rosenr@marvell.com>
 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2. This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

14 15
#include <linux/clk.h>
#include <linux/cpu.h>
16
#include <linux/etherdevice.h>
17
#include <linux/if_vlan.h>
18 19
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
20
#include <linux/io.h>
21 22 23 24
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/netdevice.h>
25
#include <linux/of.h>
26
#include <linux/of_address.h>
27 28 29 30
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
R
Russell King 已提交
31
#include <linux/phylink.h>
32 33
#include <linux/platform_device.h>
#include <linux/skbuff.h>
34
#include <net/hwbm.h>
35
#include "mvneta_bm.h"
36 37 38
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
39 40 41

/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
42
#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
43 44 45 46
#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
#define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
#define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
47 48 49 50 51 52 53 54 55 56 57 58 59
#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
60 61 62
#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
63 64 65 66 67 68 69 70 71 72 73 74
#define MVNETA_PORT_RX_RESET                    0x1cc0
#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
#define MVNETA_PHY_ADDR                         0x2000
#define      MVNETA_PHY_ADDR_MASK               0x1f
#define MVNETA_MBUS_RETRY                       0x2010
#define MVNETA_UNIT_INTR_CAUSE                  0x2080
#define MVNETA_UNIT_CONTROL                     0x20B0
#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE                 0x2290
75
#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
#define MVNETA_PORT_CONFIG                      0x2400
#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
						 MVNETA_DEF_RXQ_ARP(q)	 | \
						 MVNETA_DEF_RXQ_TCP(q)	 | \
						 MVNETA_DEF_RXQ_UDP(q)	 | \
						 MVNETA_DEF_RXQ_BPDU(q)	 | \
						 MVNETA_TX_UNSET_ERR_SUM | \
						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
#define MVNETA_PORT_CONFIG_EXTEND                0x2404
#define MVNETA_MAC_ADDR_LOW                      0x2414
#define MVNETA_MAC_ADDR_HIGH                     0x2418
#define MVNETA_SDMA_CONFIG                       0x241c
#define      MVNETA_SDMA_BRST_SIZE_16            4
#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
100
#define      MVNETA_DESC_SWAP                    BIT(6)
101 102 103 104 105
#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
#define MVNETA_PORT_STATUS                       0x2444
#define      MVNETA_TX_IN_PRGRS                  BIT(1)
#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
106
#define MVNETA_SERDES_CFG			 0x24A0
107
#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
108
#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
109 110 111 112 113 114
#define MVNETA_TYPE_PRIO                         0x24bc
#define      MVNETA_FORCE_UNI                    BIT(21)
#define MVNETA_TXQ_CMD_1                         0x24e4
#define MVNETA_TXQ_CMD                           0x2448
#define      MVNETA_TXQ_DISABLE_SHIFT            8
#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
115 116
#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
117 118
#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
119
#define MVNETA_ACC_MODE                          0x2500
120
#define MVNETA_BM_ADDRESS                        0x2504
121 122 123
#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
124
#define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
125
#define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
126
#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
127

128 129 130 131 132 133 134
/* Exception Interrupt Port/Queue Cause register
 *
 * Their behavior depend of the mapping done using the PCPX2Q
 * registers. For a given CPU if the bit associated to a queue is not
 * set, then for the register a read from this CPU will always return
 * 0 and a write won't do anything
 */
135

136 137
#define MVNETA_INTR_NEW_CAUSE                    0x25a0
#define MVNETA_INTR_NEW_MASK                     0x25a4
138 139 140 141 142 143 144 145 146 147 148 149

/* bits  0..7  = TXQ SENT, one bit per queue.
 * bits  8..15 = RXQ OCCUP, one bit per queue.
 * bits 16..23 = RXQ FREE, one bit per queue.
 * bit  29 = OLD_REG_SUM, see old reg ?
 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 * bit  31 = MISC_SUM,   one bit for 4 ports
 */
#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
150
#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
151

152 153
#define MVNETA_INTR_OLD_CAUSE                    0x25a8
#define MVNETA_INTR_OLD_MASK                     0x25ac
154 155

/* Data Path Port/Queue Cause Register */
156 157
#define MVNETA_INTR_MISC_CAUSE                   0x25b0
#define MVNETA_INTR_MISC_MASK                    0x25b4
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
#define      MVNETA_CAUSE_PTP                    BIT(4)

#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)

#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))

#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))

180 181
#define MVNETA_INTR_ENABLE                       0x25b8
#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
182
#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
183

184 185 186 187 188 189 190 191 192 193
#define MVNETA_RXQ_CMD                           0x2680
#define      MVNETA_RXQ_DISABLE_SHIFT            8
#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
#define MVNETA_GMAC_CTRL_0                       0x2c00
#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
#define MVNETA_GMAC_CTRL_2                       0x2c08
194
#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
195
#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
196 197 198 199 200 201 202 203 204 205 206
#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
#define MVNETA_GMAC_STATUS                       0x2c10
#define      MVNETA_GMAC_LINK_UP                 BIT(0)
#define      MVNETA_GMAC_SPEED_1000              BIT(1)
#define      MVNETA_GMAC_SPEED_100               BIT(2)
#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
R
Russell King 已提交
207 208
#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
209 210 211
#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
212
#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
213 214
#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
215
#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
216
#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
217
#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
218
#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
219
#define MVNETA_MIB_COUNTERS_BASE                 0x3000
220 221 222 223 224 225 226 227 228 229
#define      MVNETA_MIB_LATE_COLLISION           0x7c
#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
#define MVNETA_DA_FILT_UCAST_BASE                0x3600
#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
230
#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
#define MVNETA_PORT_TX_RESET                     0x3cf0
#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
#define MVNETA_TX_MTU                            0x3e0c
#define MVNETA_TX_TOKEN_SIZE                     0x3e14
#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff

#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff

/* Descriptor ring Macros */
#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
	(((index) < (q)->last_desc) ? ((index) + 1) : 0)

/* Various constants */

/* Coalescing */
251
#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
252 253 254
#define MVNETA_RX_COAL_PKTS		32
#define MVNETA_RX_COAL_USEC		100

255
/* The two bytes Marvell header. Either contains a special value used
256 257 258 259 260 261 262 263 264 265 266
 * by Marvell switches when a specific hardware mode is enabled (not
 * supported by this driver) or is filled automatically by zeroes on
 * the RX side. Those two bytes being at the front of the Ethernet
 * header, they allow to have the IP header aligned on a 4 bytes
 * boundary automatically: the hardware skips those two bytes on its
 * own.
 */
#define MVNETA_MH_SIZE			2

#define MVNETA_VLAN_TAG_LEN             4

267
#define MVNETA_TX_CSUM_DEF_SIZE		1600
268
#define MVNETA_TX_CSUM_MAX_SIZE		9800
269 270 271 272
#define MVNETA_ACC_MODE_EXT1		1
#define MVNETA_ACC_MODE_EXT2		2

#define MVNETA_MAX_DECODE_WIN		6
273 274 275 276 277 278 279 280

/* Timeout constants */
#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000

#define MVNETA_TX_MTU_MAX		0x3ffff

281 282 283 284 285
/* The RSS lookup table actually has 256 entries but we do not use
 * them yet
 */
#define MVNETA_RSS_LU_TABLE_SIZE	1

286 287 288 289 290 291
/* Max number of Rx descriptors */
#define MVNETA_MAX_RXD 128

/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD 532

292 293 294 295 296
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100

#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

297 298 299
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE	32

300 301 302 303 304 305
/* Number of bytes to be taken into account by HW when putting incoming data
 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 */
#define MVNETA_RX_PKT_OFFSET_CORRECTION		64

306 307 308
#define MVNETA_RX_PKT_SIZE(mtu) \
	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
	      ETH_HLEN + ETH_FCS_LEN,			     \
309
	      cache_line_size())
310

311 312 313 314
#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_phys) && \
	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))

315 316
#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
317

R
Russell King 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
struct mvneta_statistic {
	unsigned short offset;
	unsigned short type;
	const char name[ETH_GSTRING_LEN];
};

#define T_REG_32	32
#define T_REG_64	64

static const struct mvneta_statistic mvneta_statistics[] = {
	{ 0x3000, T_REG_64, "good_octets_received", },
	{ 0x3010, T_REG_32, "good_frames_received", },
	{ 0x3008, T_REG_32, "bad_octets_received", },
	{ 0x3014, T_REG_32, "bad_frames_received", },
	{ 0x3018, T_REG_32, "broadcast_frames_received", },
	{ 0x301c, T_REG_32, "multicast_frames_received", },
	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
	{ 0x3058, T_REG_32, "good_fc_received", },
	{ 0x305c, T_REG_32, "bad_fc_received", },
	{ 0x3060, T_REG_32, "undersize_received", },
	{ 0x3064, T_REG_32, "fragments_received", },
	{ 0x3068, T_REG_32, "oversize_received", },
	{ 0x306c, T_REG_32, "jabber_received", },
	{ 0x3070, T_REG_32, "mac_receive_error", },
	{ 0x3074, T_REG_32, "bad_crc_event", },
	{ 0x3078, T_REG_32, "collision", },
	{ 0x307c, T_REG_32, "late_collision", },
	{ 0x2484, T_REG_32, "rx_discard", },
	{ 0x2488, T_REG_32, "rx_overrun", },
	{ 0x3020, T_REG_32, "frames_64_octets", },
	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
	{ 0x3038, T_REG_64, "good_octets_sent", },
	{ 0x3040, T_REG_32, "good_frames_sent", },
	{ 0x3044, T_REG_32, "excessive_collision", },
	{ 0x3048, T_REG_32, "multicast_frames_sent", },
	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
	{ 0x3054, T_REG_32, "fc_sent", },
	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
};

362
struct mvneta_pcpu_stats {
363
	struct	u64_stats_sync syncp;
364 365 366 367
	u64	rx_packets;
	u64	rx_bytes;
	u64	tx_packets;
	u64	tx_bytes;
368 369
};

370 371 372 373 374 375 376 377 378 379 380
struct mvneta_pcpu_port {
	/* Pointer to the shared port */
	struct mvneta_port	*pp;

	/* Pointer to the CPU-local NAPI struct */
	struct napi_struct	napi;

	/* Cause of the previous interrupt */
	u32			cause_rx_tx;
};

381
struct mvneta_port {
382
	u8 id;
383 384 385
	struct mvneta_pcpu_port __percpu	*ports;
	struct mvneta_pcpu_stats __percpu	*stats;

386
	int pkt_size;
387
	unsigned int frag_size;
388 389 390 391
	void __iomem *base;
	struct mvneta_rx_queue *rxqs;
	struct mvneta_tx_queue *txqs;
	struct net_device *dev;
392 393
	struct hlist_node node_online;
	struct hlist_node node_dead;
394
	int rxq_def;
395 396 397 398
	/* Protect the access to the percpu interrupt registers,
	 * ensuring that the configuration remains coherent.
	 */
	spinlock_t lock;
399
	bool is_stopped;
400

401 402 403
	u32 cause_rx_tx;
	struct napi_struct napi;

404
	/* Core clock */
T
Thomas Petazzoni 已提交
405
	struct clk *clk;
406 407
	/* AXI clock */
	struct clk *clk_bus;
408 409 410 411 412
	u8 mcast_count[256];
	u16 tx_ring_size;
	u16 rx_ring_size;

	phy_interface_t phy_interface;
R
Russell King 已提交
413
	struct device_node *dn;
414
	unsigned int tx_csum_limit;
R
Russell King 已提交
415
	struct phylink *phylink;
R
Russell King 已提交
416

417 418 419 420 421
	struct mvneta_bm *bm_priv;
	struct mvneta_bm_pool *pool_long;
	struct mvneta_bm_pool *pool_short;
	int bm_win_id;

R
Russell King 已提交
422
	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
423 424

	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
425 426 427

	/* Flags for special SoC configurations */
	bool neta_armada3700;
428
	u16 rx_offset_correction;
429
	const struct mbus_dram_target_info *dram_target_info;
430 431
};

432
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
433 434 435
 * layout of the transmit and reception DMA descriptors, and their
 * layout is therefore defined by the hardware design
 */
436

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
#define MVNETA_TX_L3_OFF_SHIFT	0
#define MVNETA_TX_IP_HLEN_SHIFT	8
#define MVNETA_TX_L4_UDP	BIT(16)
#define MVNETA_TX_L3_IP6	BIT(17)
#define MVNETA_TXD_IP_CSUM	BIT(18)
#define MVNETA_TXD_Z_PAD	BIT(19)
#define MVNETA_TXD_L_DESC	BIT(20)
#define MVNETA_TXD_F_DESC	BIT(21)
#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
				 MVNETA_TXD_L_DESC | \
				 MVNETA_TXD_F_DESC)
#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
#define MVNETA_TX_L4_CSUM_NOT	BIT(31)

#define MVNETA_RXD_ERR_CRC		0x0
452 453
#define MVNETA_RXD_BM_POOL_SHIFT	13
#define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
454 455 456 457 458 459 460 461 462
#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
#define MVNETA_RXD_ERR_LEN		BIT(18)
#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4		BIT(25)
#define MVNETA_RXD_FIRST_LAST_DESC	(BIT(26) | BIT(27))
#define MVNETA_RXD_L4_CSUM_OK		BIT(30)

463
#if defined(__LITTLE_ENDIAN)
464 465 466 467 468 469 470 471 472 473 474
struct mvneta_tx_desc {
	u32  command;		/* Options used by HW for packet transmitting.*/
	u16  reserverd1;	/* csum_l4 (for future use)		*/
	u16  data_size;		/* Data size of transmitted packet in bytes */
	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
	u32  reserved3[4];	/* Reserved - (for future use)		*/
};

struct mvneta_rx_desc {
	u32  status;		/* Info about received packet		*/
475 476
	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
	u16  data_size;		/* Size of received packet in bytes	*/
477

478 479
	u32  buf_phys_addr;	/* Physical address of the buffer	*/
	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
480

481 482 483
	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
	u16  reserved3;		/* prefetch_cmd, for future use		*/
	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
484

485 486 487
	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
};
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
#else
struct mvneta_tx_desc {
	u16  data_size;		/* Data size of transmitted packet in bytes */
	u16  reserverd1;	/* csum_l4 (for future use)		*/
	u32  command;		/* Options used by HW for packet transmitting.*/
	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
	u32  reserved3[4];	/* Reserved - (for future use)		*/
};

struct mvneta_rx_desc {
	u16  data_size;		/* Size of received packet in bytes	*/
	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
	u32  status;		/* Info about received packet		*/

	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
	u32  buf_phys_addr;	/* Physical address of the buffer	*/

	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
	u16  reserved3;		/* prefetch_cmd, for future use		*/
	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */

	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
};
#endif
514 515 516 517 518 519 520 521 522

struct mvneta_tx_queue {
	/* Number of this TX queue, in the range 0-7 */
	u8 id;

	/* Number of TX DMA descriptors in the descriptor ring */
	int size;

	/* Number of currently used TX DMA descriptor in the
523 524
	 * descriptor ring
	 */
525
	int count;
526
	int pending;
527 528
	int tx_stop_threshold;
	int tx_wake_threshold;
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

	/* Array of transmitted skb */
	struct sk_buff **tx_skb;

	/* Index of last TX DMA descriptor that was inserted */
	int txq_put_index;

	/* Index of the TX DMA descriptor to be cleaned up */
	int txq_get_index;

	u32 done_pkts_coal;

	/* Virtual address of the TX DMA descriptors array */
	struct mvneta_tx_desc *descs;

	/* DMA address of the TX DMA descriptors array */
	dma_addr_t descs_phys;

	/* Index of the last TX DMA descriptor */
	int last_desc;

	/* Index of the next TX DMA descriptor to process */
	int next_desc_to_proc;
552 553 554 555 556 557

	/* DMA buffers for TSO headers */
	char *tso_hdrs;

	/* DMA address of TSO headers */
	dma_addr_t tso_hdrs_phys;
558 559 560

	/* Affinity mask for CPUs*/
	cpumask_t affinity_mask;
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
};

struct mvneta_rx_queue {
	/* rx queue number, in the range 0-7 */
	u8 id;

	/* num of rx descriptors in the rx descriptor ring */
	int size;

	/* counter of times when mvneta_refill() failed */
	int missed;

	u32 pkts_coal;
	u32 time_coal;

576 577 578
	/* Virtual address of the RX buffer */
	void  **buf_virt_addr;

579 580 581 582 583 584 585 586 587 588 589 590 591
	/* Virtual address of the RX DMA descriptors array */
	struct mvneta_rx_desc *descs;

	/* DMA address of the RX DMA descriptors array */
	dma_addr_t descs_phys;

	/* Index of the last RX DMA descriptor */
	int last_desc;

	/* Index of the next RX DMA descriptor to process */
	int next_desc_to_proc;
};

592
static enum cpuhp_state online_hpstate;
593 594 595
/* The hardware supports eight (8) rx queues, but we are only allowing
 * the first one to be used. Therefore, let's just allocate one queue.
 */
596
static int rxq_number = 8;
597 598 599 600
static int txq_number = 8;

static int rxq_def;

601 602
static int rx_copybreak __read_mostly = 256;

603 604 605
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"

/* Utility/helper methods */

/* Write helper method */
static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
{
	writel(data, pp->base + offset);
}

/* Read helper method */
static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
{
	return readl(pp->base + offset);
}

/* Increment txq get counter */
static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
{
	txq->txq_get_index++;
	if (txq->txq_get_index == txq->size)
		txq->txq_get_index = 0;
}

/* Increment txq put counter */
static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
{
	txq->txq_put_index++;
	if (txq->txq_put_index == txq->size)
		txq->txq_put_index = 0;
}


/* Clear all MIB counters */
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
	int i;
	u32 dummy;

	/* Perform dummy reads from MIB counters */
	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
649 650
	dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
	dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
651 652 653
}

/* Get System Network Statistics */
654
static void
655 656
mvneta_get_stats64(struct net_device *dev,
		   struct rtnl_link_stats64 *stats)
657 658 659
{
	struct mvneta_port *pp = netdev_priv(dev);
	unsigned int start;
660
	int cpu;
661

662 663 664 665 666 667
	for_each_possible_cpu(cpu) {
		struct mvneta_pcpu_stats *cpu_stats;
		u64 rx_packets;
		u64 rx_bytes;
		u64 tx_packets;
		u64 tx_bytes;
668

669 670
		cpu_stats = per_cpu_ptr(pp->stats, cpu);
		do {
671
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
672 673 674 675
			rx_packets = cpu_stats->rx_packets;
			rx_bytes   = cpu_stats->rx_bytes;
			tx_packets = cpu_stats->tx_packets;
			tx_bytes   = cpu_stats->tx_bytes;
676
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
677

678 679 680 681 682
		stats->rx_packets += rx_packets;
		stats->rx_bytes   += rx_bytes;
		stats->tx_packets += tx_packets;
		stats->tx_bytes   += tx_bytes;
	}
683 684 685 686 687 688 689 690 691

	stats->rx_errors	= dev->stats.rx_errors;
	stats->rx_dropped	= dev->stats.rx_dropped;

	stats->tx_dropped	= dev->stats.tx_dropped;
}

/* Rx descriptors helper methods */

692 693
/* Checks whether the RX descriptor having this status is both the first
 * and the last descriptor for the RX packet. Each RX packet is currently
694 695 696
 * received through a single RX descriptor, so not having each RX
 * descriptor with its first and last bits set is an error
 */
697
static int mvneta_rxq_desc_is_first_last(u32 status)
698
{
699
	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
700 701 702 703 704 705 706 707 708
		MVNETA_RXD_FIRST_LAST_DESC;
}

/* Add number of descriptors ready to receive new packets */
static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
					  struct mvneta_rx_queue *rxq,
					  int ndescs)
{
	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
709 710
	 * be added at once
	 */
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
	}

	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
}

/* Get number of RX descriptors occupied by received packets */
static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
					struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
}

732
/* Update num of rx desc called upon return from rx path or
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
 * from mvneta_rxq_drop_pkts().
 */
static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
				       struct mvneta_rx_queue *rxq,
				       int rx_done, int rx_filled)
{
	u32 val;

	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
		val = rx_done |
		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
		return;
	}

	/* Only 255 descriptors can be added at once */
	while ((rx_done > 0) || (rx_filled > 0)) {
		if (rx_done <= 0xff) {
			val = rx_done;
			rx_done = 0;
		} else {
			val = 0xff;
			rx_done -= 0xff;
		}
		if (rx_filled <= 0xff) {
			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
			rx_filled = 0;
		} else {
			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
			rx_filled -= 0xff;
		}
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
	}
}

/* Get pointer to next RX descriptor to be processed by SW */
static struct mvneta_rx_desc *
mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
{
	int rx_desc = rxq->next_desc_to_proc;

	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
775
	prefetch(rxq->descs + rxq->next_desc_to_proc);
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	return rxq->descs + rx_desc;
}

/* Change maximum receive size of the port. */
static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
{
	u32 val;

	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}


/* Set rx queue offset */
static void mvneta_rxq_offset_set(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq,
				  int offset)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;

	/* Offset is in */
	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}


/* Tx descriptors helper methods */

/* Update HW with number of TX descriptors to be sent */
static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int pend_desc)
{
	u32 val;

817 818 819 820 821 822 823 824
	pend_desc += txq->pending;

	/* Only 255 Tx descriptors can be added at once */
	do {
		val = min(pend_desc, 255);
		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
		pend_desc -= val;
	} while (pend_desc > 0);
825
	txq->pending = 0;
826 827 828 829 830 831 832 833 834 835 836 837 838
}

/* Get pointer to next TX descriptor to be processed (send) by HW */
static struct mvneta_tx_desc *
mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
{
	int tx_desc = txq->next_desc_to_proc;

	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
	return txq->descs + tx_desc;
}

/* Release the last allocated TX descriptor. Useful to handle DMA
839 840
 * mapping failures in the TX path.
 */
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
{
	if (txq->next_desc_to_proc == 0)
		txq->next_desc_to_proc = txq->last_desc - 1;
	else
		txq->next_desc_to_proc--;
}

/* Set rxq buf size */
static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq,
				    int buf_size)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));

	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
}

/* Disable buffer management (BM) */
static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
/* Enable buffer management (BM) */
static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val |= MVNETA_RXQ_HW_BUF_ALLOC;
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Notify HW about port's assignment of pool for bigger packets */
static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
				     struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Notify HW about port's assignment of pool for smaller packets */
static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
				      struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Set port's receive buffer size for assigned BM pool */
static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
					      int buf_size,
					      u8 pool_id)
{
	u32 val;

	if (!IS_ALIGNED(buf_size, 8)) {
		dev_warn(pp->dev->dev.parent,
			 "illegal buf_size value %d, round to %d\n",
			 buf_size, ALIGN(buf_size, 8));
		buf_size = ALIGN(buf_size, 8);
	}

	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
}

/* Configure MBUS window in order to enable access BM internal SRAM */
static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
				  u8 target, u8 attr)
{
	u32 win_enable, win_protect;
	int i;

	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);

	if (pp->bm_win_id < 0) {
		/* Find first not occupied window */
		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
			if (win_enable & (1 << i)) {
				pp->bm_win_id = i;
				break;
			}
		}
		if (i == MVNETA_MAX_DECODE_WIN)
			return -ENOMEM;
	} else {
		i = pp->bm_win_id;
	}

	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);

	if (i < 4)
		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);

	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
		    (attr << 8) | target);

	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);

	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
	win_protect |= 3 << (2 * i);
	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);

	win_enable &= ~(1 << i);
	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);

	return 0;
}

975
static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
976
{
977
	u32 wsize;
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
	u8 target, attr;
	int err;

	/* Get BM window information */
	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
					 &target, &attr);
	if (err < 0)
		return err;

	pp->bm_win_id = -1;

	/* Open NETA -> BM window */
	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
				     target, attr);
	if (err < 0) {
		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
		return err;
	}
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	return 0;
}

/* Assign and initialize pools for port. In case of fail
 * buffer manager will remain disabled for current port.
 */
static int mvneta_bm_port_init(struct platform_device *pdev,
			       struct mvneta_port *pp)
{
	struct device_node *dn = pdev->dev.of_node;
	u32 long_pool_id, short_pool_id;

	if (!pp->neta_armada3700) {
		int ret;

		ret = mvneta_bm_port_mbus_init(pp);
		if (ret)
			return ret;
	}
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061

	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
		netdev_info(pp->dev, "missing long pool id\n");
		return -EINVAL;
	}

	/* Create port's long pool depending on mtu */
	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
					   MVNETA_BM_LONG, pp->id,
					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
	if (!pp->pool_long) {
		netdev_info(pp->dev, "fail to obtain long pool for port\n");
		return -ENOMEM;
	}

	pp->pool_long->port_map |= 1 << pp->id;

	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
				   pp->pool_long->id);

	/* If short pool id is not defined, assume using single pool */
	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
		short_pool_id = long_pool_id;

	/* Create port's short pool */
	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
					    MVNETA_BM_SHORT, pp->id,
					    MVNETA_BM_SHORT_PKT_SIZE);
	if (!pp->pool_short) {
		netdev_info(pp->dev, "fail to obtain short pool for port\n");
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		return -ENOMEM;
	}

	if (short_pool_id != long_pool_id) {
		pp->pool_short->port_map |= 1 << pp->id;
		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
					   pp->pool_short->id);
	}

	return 0;
}

/* Update settings of a pool for bigger packets */
static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
{
	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1062
	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1063 1064 1065 1066
	int num;

	/* Release all buffers from long pool */
	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1067
	if (hwbm_pool->buf_num) {
1068 1069 1070 1071 1072 1073 1074
		WARN(1, "cannot free all buffers in pool %d\n",
		     bm_pool->id);
		goto bm_mtu_err;
	}

	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1075 1076
	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1077 1078

	/* Fill entire long pool */
1079 1080
	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
	if (num != hwbm_pool->size) {
1081
		WARN(1, "pool %d: %d of %d allocated\n",
1082
		     bm_pool->id, num, hwbm_pool->size);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
		goto bm_mtu_err;
	}
	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);

	return;

bm_mtu_err:
	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);

	pp->bm_priv = NULL;
	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
	int queue;
	u32 q_map;

	/* Enable all initialized TXs. */
	q_map = 0;
	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1108
		if (txq->descs)
1109 1110 1111 1112 1113
			q_map |= (1 << queue);
	}
	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);

	/* Enable all initialized RXQs. */
1114 1115 1116
	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];

1117
		if (rxq->descs)
1118 1119 1120
			q_map |= (1 << queue);
	}
	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
}

/* Stop the Ethernet port activity */
static void mvneta_port_down(struct mvneta_port *pp)
{
	u32 val;
	int count;

	/* Stop Rx port activity. Check port Rx activity. */
	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;

	/* Issue stop command for active channels only */
	if (val != 0)
		mvreg_write(pp, MVNETA_RXQ_CMD,
			    val << MVNETA_RXQ_DISABLE_SHIFT);

	/* Wait for all Rx activity to terminate. */
	count = 0;
	do {
		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
			netdev_warn(pp->dev,
1142
				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1143 1144 1145 1146 1147 1148
				    val);
			break;
		}
		mdelay(1);

		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1149
	} while (val & MVNETA_RXQ_ENABLE_MASK);
1150 1151

	/* Stop Tx port activity. Check port Tx activity. Issue stop
1152 1153
	 * command for active channels only
	 */
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;

	if (val != 0)
		mvreg_write(pp, MVNETA_TXQ_CMD,
			    (val << MVNETA_TXQ_DISABLE_SHIFT));

	/* Wait for all Tx activity to terminate. */
	count = 0;
	do {
		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
			netdev_warn(pp->dev,
				    "TIMEOUT for TX stopped status=0x%08x\n",
				    val);
			break;
		}
		mdelay(1);

		/* Check TX Command reg that all Txqs are stopped */
		val = mvreg_read(pp, MVNETA_TXQ_CMD);

1174
	} while (val & MVNETA_TXQ_ENABLE_MASK);
1175 1176 1177 1178 1179 1180

	/* Double check to verify that TX FIFO is empty */
	count = 0;
	do {
		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
			netdev_warn(pp->dev,
1181
				    "TX FIFO empty timeout status=0x%08x\n",
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
				    val);
			break;
		}
		mdelay(1);

		val = mvreg_read(pp, MVNETA_PORT_STATUS);
	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
		 (val & MVNETA_TX_IN_PRGRS));

	udelay(200);
}

/* Enable the port by setting the port enable bit of the MAC control register */
static void mvneta_port_enable(struct mvneta_port *pp)
{
	u32 val;

	/* Enable port */
	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val |= MVNETA_GMAC0_PORT_ENABLE;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}

/* Disable the port and wait for about 200 usec before retuning */
static void mvneta_port_disable(struct mvneta_port *pp)
{
	u32 val;

	/* Reset the Enable bit in the Serial Control Register */
	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val &= ~MVNETA_GMAC0_PORT_ENABLE;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);

	udelay(200);
}

/* Multicast tables methods */

/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		val = 0;
	} else {
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
}

/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		val = 0;
	} else {
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xfc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);

}

/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
		val = 0;
	} else {
		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xfc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
}

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
static void mvneta_percpu_unmask_interrupt(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are unmasked, but actually only the ones
	 * mapped to this CPU will be unmasked
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
		    MVNETA_RX_INTR_MASK_ALL |
		    MVNETA_TX_INTR_MASK_ALL |
		    MVNETA_MISCINTR_INTR_MASK);
}

static void mvneta_percpu_mask_interrupt(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are masked, but actually only the ones
	 * mapped to this CPU will be masked
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}

static void mvneta_percpu_clear_intr_cause(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are cleared, but actually only the ones
	 * mapped to this CPU will be cleared
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
}

1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
/* This method sets defaults to the NETA port:
 *	Clears interrupt Cause and Mask registers.
 *	Clears all MAC tables.
 *	Sets defaults to all registers.
 *	Resets RX and TX descriptor rings.
 *	Resets PHY.
 * This method can be called after mvneta_port_down() to return the port
 *	settings to defaults.
 */
static void mvneta_defaults_set(struct mvneta_port *pp)
{
	int cpu;
	int queue;
	u32 val;
1325
	int max_cpu = num_present_cpus();
1326 1327

	/* Clear all Cause registers */
1328
	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1329 1330

	/* Mask all interrupts */
1331
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1332 1333 1334 1335 1336
	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);

	/* Enable MBUS Retry bit16 */
	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);

1337 1338 1339 1340
	/* Set CPU queue access map. CPUs are assigned to the RX and
	 * TX queues modulo their number. If there is only one TX
	 * queue then it is assigned to the CPU associated to the
	 * default RX queue.
1341
	 */
1342 1343
	for_each_present_cpu(cpu) {
		int rxq_map = 0, txq_map = 0;
1344
		int rxq, txq;
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
		if (!pp->neta_armada3700) {
			for (rxq = 0; rxq < rxq_number; rxq++)
				if ((rxq % max_cpu) == cpu)
					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);

			for (txq = 0; txq < txq_number; txq++)
				if ((txq % max_cpu) == cpu)
					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);

			/* With only one TX queue we configure a special case
			 * which will allow to get all the irq on a single
			 * CPU
			 */
			if (txq_number == 1)
				txq_map = (cpu == pp->rxq_def) ?
					MVNETA_CPU_TXQ_ACCESS(1) : 0;
1361

1362 1363 1364 1365
		} else {
			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
		}
1366 1367 1368

		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
	}
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384

	/* Reset RX and TX DMAs */
	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);

	/* Disable Legacy WRR, Disable EJP, Release from reset */
	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
	for (queue = 0; queue < txq_number; queue++) {
		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
	}

	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);

	/* Set Port Acceleration Mode */
1385 1386 1387 1388 1389 1390
	if (pp->bm_priv)
		/* HW buffer management + legacy parser */
		val = MVNETA_ACC_MODE_EXT2;
	else
		/* SW buffer management + legacy parser */
		val = MVNETA_ACC_MODE_EXT1;
1391 1392
	mvreg_write(pp, MVNETA_ACC_MODE, val);

1393 1394 1395
	if (pp->bm_priv)
		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);

1396
	/* Update val of portCfg register accordingly with all RxQueue types */
1397
	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
	mvreg_write(pp, MVNETA_PORT_CONFIG, val);

	val = 0;
	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);

	/* Build PORT_SDMA_CONFIG_REG */
	val = 0;

	/* Default burst size */
	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1410
	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1411

1412 1413 1414
#if defined(__BIG_ENDIAN)
	val |= MVNETA_DESC_SWAP;
#endif
1415 1416 1417 1418

	/* Assign port SDMA configuration */
	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);

1419 1420 1421 1422 1423 1424 1425
	/* Disable PHY polling in hardware, since we're using the
	 * kernel phylib to do this.
	 */
	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
	val &= ~MVNETA_PHY_POLLING_ENABLE;
	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);

1426 1427 1428 1429 1430 1431 1432 1433
	mvneta_set_ucast_table(pp, -1);
	mvneta_set_special_mcast_table(pp, -1);
	mvneta_set_other_mcast_table(pp, -1);

	/* Set port interrupt enable register - default enable all */
	mvreg_write(pp, MVNETA_INTR_ENABLE,
		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1434 1435

	mvneta_mib_counters_clear(pp);
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
}

/* Set max sizes for tx queues */
static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)

{
	u32 val, size, mtu;
	int queue;

	mtu = max_tx_size * 8;
	if (mtu > MVNETA_TX_MTU_MAX)
		mtu = MVNETA_TX_MTU_MAX;

	/* Set MTU */
	val = mvreg_read(pp, MVNETA_TX_MTU);
	val &= ~MVNETA_TX_MTU_MAX;
	val |= mtu;
	mvreg_write(pp, MVNETA_TX_MTU, val);

	/* TX token size and all TXQs token size must be larger that MTU */
	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);

	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
	if (size < mtu) {
		size = mtu;
		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
		val |= size;
		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
	}
	for (queue = 0; queue < txq_number; queue++) {
		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));

		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
		if (size < mtu) {
			size = mtu;
			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
			val |= size;
			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
		}
	}
}

/* Set unicast address */
static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
				  int queue)
{
	unsigned int unicast_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	/* Locate the Unicast table entry */
	last_nibble = (0xf & last_nibble);

	/* offset from unicast tbl base */
	tbl_offset = (last_nibble / 4) * 4;

	/* offset within the above reg  */
	reg_offset = last_nibble % 4;

	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));

	if (queue == -1) {
		/* Clear accepts frame bit at specified unicast DA tbl entry */
		unicast_reg &= ~(0xff << (8 * reg_offset));
	} else {
		unicast_reg &= ~(0xff << (8 * reg_offset));
		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
}

/* Set mac address */
static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
				int queue)
{
	unsigned int mac_h;
	unsigned int mac_l;

	if (queue != -1) {
		mac_l = (addr[4] << 8) | (addr[5]);
		mac_h = (addr[0] << 24) | (addr[1] << 16) |
			(addr[2] << 8) | (addr[3] << 0);

		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
	}

	/* Accept frames of this address */
	mvneta_set_ucast_addr(pp, addr[5], queue);
}

1528 1529
/* Set the number of packets that will be received before RX interrupt
 * will be generated by HW.
1530 1531 1532 1533 1534 1535 1536 1537 1538
 */
static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{
	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
		    value | MVNETA_RXQ_NON_OCCUPIED(0));
	rxq->pkts_coal = value;
}

1539 1540
/* Set the time delay in usec before RX interrupt will be generated by
 * HW.
1541 1542 1543 1544
 */
static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{
T
Thomas Petazzoni 已提交
1545 1546 1547 1548 1549
	u32 val;
	unsigned long clk_rate;

	clk_rate = clk_get_rate(pp->clk);
	val = (clk_rate / 1000000) * value;
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572

	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
	rxq->time_coal = value;
}

/* Set threshold for TX_DONE pkts coalescing */
static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
					 struct mvneta_tx_queue *txq, u32 value)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));

	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);

	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);

	txq->done_pkts_coal = value;
}

/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1573 1574
				u32 phys_addr, void *virt_addr,
				struct mvneta_rx_queue *rxq)
1575
{
1576 1577
	int i;

1578
	rx_desc->buf_phys_addr = phys_addr;
1579 1580
	i = rx_desc - rxq->descs;
	rxq->buf_virt_addr[i] = virt_addr;
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
}

/* Decrement sent descriptors counter */
static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int sent_desc)
{
	u32 val;

	/* Only 255 TX descriptors can be updated at once */
	while (sent_desc > 0xff) {
		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
		sent_desc = sent_desc - 0xff;
	}

	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
}

/* Get number of TX descriptors already sent by HW */
static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
					struct mvneta_tx_queue *txq)
{
	u32 val;
	int sent_desc;

	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
		MVNETA_TXQ_SENT_DESC_SHIFT;

	return sent_desc;
}

1615
/* Get number of sent descriptors and decrement counter.
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
 *  The number of sent descriptors is returned.
 */
static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq)
{
	int sent_desc;

	/* Get number of sent descriptors */
	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);

	/* Decrement sent descriptors counter */
	if (sent_desc)
		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);

	return sent_desc;
}

/* Set TXQ descriptors fields relevant for CSUM calculation */
static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
				int ip_hdr_len, int l4_proto)
{
	u32 command;

	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1640 1641 1642
	 * G_L4_chk, L4_type; required only for checksum
	 * calculation
	 */
1643 1644 1645
	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;

1646
	if (l3_proto == htons(ETH_P_IP))
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
		command |= MVNETA_TXD_IP_CSUM;
	else
		command |= MVNETA_TX_L3_IP6;

	if (l4_proto == IPPROTO_TCP)
		command |=  MVNETA_TX_L4_CSUM_FULL;
	else if (l4_proto == IPPROTO_UDP)
		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
	else
		command |= MVNETA_TX_L4_CSUM_NOT;

	return command;
}


/* Display more error info */
static void mvneta_rx_error(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc)
{
	u32 status = rx_desc->status;

1668
	if (!mvneta_rxq_desc_is_first_last(status)) {
1669 1670
		netdev_err(pp->dev,
			   "bad rx status %08x (buffer oversize), size=%d\n",
1671
			   status, rx_desc->data_size);
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		return;
	}

	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
	case MVNETA_RXD_ERR_CRC:
		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_OVERRUN:
		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_LEN:
		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_RESOURCE:
		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	}
}

1695 1696
/* Handle RX checksum offload based on the descriptor's status */
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1697 1698
			   struct sk_buff *skb)
{
1699 1700
	if ((status & MVNETA_RXD_L3_IP4) &&
	    (status & MVNETA_RXD_L4_CSUM_OK)) {
1701 1702 1703 1704 1705 1706 1707 1708
		skb->csum = 0;
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		return;
	}

	skb->ip_summed = CHECKSUM_NONE;
}

1709 1710 1711 1712
/* Return tx queue pointer (find last set bit) according to <cause> returned
 * form tx_done reg. <cause> must not be null. The return value is always a
 * valid queue for matching the first one found in <cause>.
 */
1713 1714 1715 1716 1717
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
						     u32 cause)
{
	int queue = fls(cause) - 1;

1718
	return &pp->txqs[queue];
1719 1720 1721 1722
}

/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
M
Marcin Wojtas 已提交
1723 1724
				 struct mvneta_tx_queue *txq, int num,
				 struct netdev_queue *nq)
1725
{
M
Marcin Wojtas 已提交
1726
	unsigned int bytes_compl = 0, pkts_compl = 0;
1727 1728 1729 1730 1731 1732 1733
	int i;

	for (i = 0; i < num; i++) {
		struct mvneta_tx_desc *tx_desc = txq->descs +
			txq->txq_get_index;
		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];

M
Marcin Wojtas 已提交
1734 1735 1736 1737 1738
		if (skb) {
			bytes_compl += skb->len;
			pkts_compl++;
		}

1739 1740
		mvneta_txq_inc_get(txq);

1741 1742 1743 1744
		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
			dma_unmap_single(pp->dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size, DMA_TO_DEVICE);
1745 1746
		if (!skb)
			continue;
1747 1748
		dev_kfree_skb_any(skb);
	}
M
Marcin Wojtas 已提交
1749 1750

	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1751 1752 1753
}

/* Handle end of transmission */
1754
static void mvneta_txq_done(struct mvneta_port *pp,
1755 1756 1757 1758 1759 1760
			   struct mvneta_tx_queue *txq)
{
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
	int tx_done;

	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1761 1762 1763
	if (!tx_done)
		return;

M
Marcin Wojtas 已提交
1764
	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1765 1766 1767 1768

	txq->count -= tx_done;

	if (netif_tx_queue_stopped(nq)) {
1769
		if (txq->count <= txq->tx_wake_threshold)
1770 1771 1772 1773
			netif_tx_wake_queue(nq);
	}
}

1774
void *mvneta_frag_alloc(unsigned int frag_size)
1775
{
1776 1777
	if (likely(frag_size <= PAGE_SIZE))
		return netdev_alloc_frag(frag_size);
1778
	else
1779
		return kmalloc(frag_size, GFP_ATOMIC);
1780
}
1781
EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1782

1783
void mvneta_frag_free(unsigned int frag_size, void *data)
1784
{
1785
	if (likely(frag_size <= PAGE_SIZE))
1786
		skb_free_frag(data);
1787 1788 1789
	else
		kfree(data);
}
1790
EXPORT_SYMBOL_GPL(mvneta_frag_free);
1791

1792
/* Refill processing for SW buffer management */
1793
static int mvneta_rx_refill(struct mvneta_port *pp,
1794 1795
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq)
1796 1797 1798

{
	dma_addr_t phys_addr;
1799
	void *data;
1800

1801
	data = mvneta_frag_alloc(pp->frag_size);
1802
	if (!data)
1803 1804
		return -ENOMEM;

1805
	phys_addr = dma_map_single(pp->dev->dev.parent, data,
1806 1807 1808
				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
				   DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1809
		mvneta_frag_free(pp->frag_size, data);
1810 1811 1812
		return -ENOMEM;
	}

1813
	phys_addr += pp->rx_offset_correction;
1814
	mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1815 1816 1817 1818 1819 1820 1821 1822
	return 0;
}

/* Handle tx checksum */
static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		int ip_hdr_len = 0;
1823
		__be16 l3_proto = vlan_get_protocol(skb);
1824 1825
		u8 l4_proto;

1826
		if (l3_proto == htons(ETH_P_IP)) {
1827 1828 1829 1830 1831
			struct iphdr *ip4h = ip_hdr(skb);

			/* Calculate IPv4 checksum and L4 checksum */
			ip_hdr_len = ip4h->ihl;
			l4_proto = ip4h->protocol;
1832
		} else if (l3_proto == htons(ETH_P_IPV6)) {
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
			struct ipv6hdr *ip6h = ipv6_hdr(skb);

			/* Read l4_protocol from one of IPv6 extra headers */
			if (skb_network_header_len(skb) > 0)
				ip_hdr_len = (skb_network_header_len(skb) >> 2);
			l4_proto = ip6h->nexthdr;
		} else
			return MVNETA_TX_L4_CSUM_NOT;

		return mvneta_txq_desc_csum(skb_network_offset(skb),
1843
					    l3_proto, ip_hdr_len, l4_proto);
1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
	}

	return MVNETA_TX_L4_CSUM_NOT;
}

/* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{
	int rx_done, i;

	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
	if (rx_done)
		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);

	if (pp->bm_priv) {
		for (i = 0; i < rx_done; i++) {
			struct mvneta_rx_desc *rx_desc =
						  mvneta_rxq_next_desc_get(rxq);
			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
			struct mvneta_bm_pool *bm_pool;

			bm_pool = &pp->bm_priv->bm_pools[pool_id];
			/* Return dropped buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);
		}
		return;
	}

1874 1875
	for (i = 0; i < rxq->size; i++) {
		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1876
		void *data = rxq->buf_virt_addr[i];
1877 1878

		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1879
				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1880
		mvneta_frag_free(pp->frag_size, data);
1881
	}
1882
}
1883

1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
/* Main rx processing when using software buffer management */
static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
			  struct mvneta_rx_queue *rxq)
{
	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
	struct net_device *dev = pp->dev;
	int rx_done;
	u32 rcvd_pkts = 0;
	u32 rcvd_bytes = 0;

	/* Get number of received packets */
	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);

	if (rx_todo > rx_done)
		rx_todo = rx_done;

	rx_done = 0;

	/* Fairness NAPI loop */
	while (rx_done < rx_todo) {
		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
		struct sk_buff *skb;
		unsigned char *data;
		dma_addr_t phys_addr;
		u32 rx_status, frag_size;
1909
		int rx_bytes, err, index;
1910 1911 1912 1913

		rx_done++;
		rx_status = rx_desc->status;
		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1914 1915
		index = rx_desc - rxq->descs;
		data = rxq->buf_virt_addr[index];
1916 1917 1918 1919
		phys_addr = rx_desc->buf_phys_addr;

		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1920
			mvneta_rx_error(pp, rx_desc);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
err_drop_frame:
			dev->stats.rx_errors++;
			/* leave the descriptor untouched */
			continue;
		}

		if (rx_bytes <= rx_copybreak) {
		/* better copy a small frame and not unmap the DMA region */
			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
			if (unlikely(!skb))
				goto err_drop_frame;

			dma_sync_single_range_for_cpu(dev->dev.parent,
1934
						      phys_addr,
1935 1936 1937
						      MVNETA_MH_SIZE + NET_SKB_PAD,
						      rx_bytes,
						      DMA_FROM_DEVICE);
1938 1939
			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
				     rx_bytes);
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952

			skb->protocol = eth_type_trans(skb, dev);
			mvneta_rx_csum(pp, rx_status, skb);
			napi_gro_receive(&port->napi, skb);

			rcvd_pkts++;
			rcvd_bytes += rx_bytes;

			/* leave the descriptor and buffer untouched */
			continue;
		}

		/* Refill processing */
1953
		err = mvneta_rx_refill(pp, rx_desc, rxq);
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
		if (err) {
			netdev_err(dev, "Linux processing - Can't refill\n");
			rxq->missed++;
			goto err_drop_frame;
		}

		frag_size = pp->frag_size;

		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);

		/* After refill old buffer has to be unmapped regardless
		 * the skb is successfully built or not.
		 */
		dma_unmap_single(dev->dev.parent, phys_addr,
				 MVNETA_RX_BUF_SIZE(pp->pkt_size),
				 DMA_FROM_DEVICE);

		if (!skb)
			goto err_drop_frame;

		rcvd_pkts++;
		rcvd_bytes += rx_bytes;

		/* Linux processing */
		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
		skb_put(skb, rx_bytes);

		skb->protocol = eth_type_trans(skb, dev);

		mvneta_rx_csum(pp, rx_status, skb);

		napi_gro_receive(&port->napi, skb);
	}

	if (rcvd_pkts) {
		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

		u64_stats_update_begin(&stats->syncp);
		stats->rx_packets += rcvd_pkts;
		stats->rx_bytes   += rcvd_bytes;
		u64_stats_update_end(&stats->syncp);
	}

	/* Update rxq management counters */
	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);

	return rx_done;
2001 2002
}

2003 2004 2005
/* Main rx processing when using hardware buffer management */
static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
			  struct mvneta_rx_queue *rxq)
2006
{
2007
	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2008
	struct net_device *dev = pp->dev;
2009
	int rx_done;
2010 2011
	u32 rcvd_pkts = 0;
	u32 rcvd_bytes = 0;
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023

	/* Get number of received packets */
	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);

	if (rx_todo > rx_done)
		rx_todo = rx_done;

	rx_done = 0;

	/* Fairness NAPI loop */
	while (rx_done < rx_todo) {
		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2024
		struct mvneta_bm_pool *bm_pool = NULL;
2025
		struct sk_buff *skb;
2026
		unsigned char *data;
2027
		dma_addr_t phys_addr;
2028
		u32 rx_status, frag_size;
2029
		int rx_bytes, err;
2030
		u8 pool_id;
2031 2032 2033

		rx_done++;
		rx_status = rx_desc->status;
2034
		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2035
		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2036
		phys_addr = rx_desc->buf_phys_addr;
2037 2038
		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2039

2040
		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2041
		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2042 2043 2044 2045 2046
err_drop_frame_ret_pool:
			/* Return the buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);
err_drop_frame:
2047 2048
			dev->stats.rx_errors++;
			mvneta_rx_error(pp, rx_desc);
2049
			/* leave the descriptor untouched */
2050 2051 2052
			continue;
		}

2053 2054 2055 2056
		if (rx_bytes <= rx_copybreak) {
			/* better copy a small frame and not unmap the DMA region */
			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
			if (unlikely(!skb))
2057
				goto err_drop_frame_ret_pool;
2058 2059 2060 2061 2062 2063

			dma_sync_single_range_for_cpu(dev->dev.parent,
			                              rx_desc->buf_phys_addr,
			                              MVNETA_MH_SIZE + NET_SKB_PAD,
			                              rx_bytes,
			                              DMA_FROM_DEVICE);
2064 2065
			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
				     rx_bytes);
2066 2067 2068

			skb->protocol = eth_type_trans(skb, dev);
			mvneta_rx_csum(pp, rx_status, skb);
2069
			napi_gro_receive(&port->napi, skb);
2070 2071 2072 2073

			rcvd_pkts++;
			rcvd_bytes += rx_bytes;

2074 2075 2076 2077
			/* Return the buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);

2078 2079 2080 2081
			/* leave the descriptor and buffer untouched */
			continue;
		}

2082
		/* Refill processing */
2083
		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2084 2085 2086
		if (err) {
			netdev_err(dev, "Linux processing - Can't refill\n");
			rxq->missed++;
2087
			goto err_drop_frame_ret_pool;
2088 2089
		}

2090
		frag_size = bm_pool->hwbm_pool.frag_size;
2091 2092

		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2093

2094 2095 2096
		/* After refill old buffer has to be unmapped regardless
		 * the skb is successfully built or not.
		 */
2097 2098
		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
				 bm_pool->buf_size, DMA_FROM_DEVICE);
2099 2100 2101
		if (!skb)
			goto err_drop_frame;

2102 2103
		rcvd_pkts++;
		rcvd_bytes += rx_bytes;
2104 2105

		/* Linux processing */
2106
		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2107 2108 2109 2110
		skb_put(skb, rx_bytes);

		skb->protocol = eth_type_trans(skb, dev);

2111
		mvneta_rx_csum(pp, rx_status, skb);
2112

2113
		napi_gro_receive(&port->napi, skb);
2114 2115
	}

2116
	if (rcvd_pkts) {
2117 2118 2119 2120 2121 2122
		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

		u64_stats_update_begin(&stats->syncp);
		stats->rx_packets += rcvd_pkts;
		stats->rx_bytes   += rcvd_bytes;
		u64_stats_update_end(&stats->syncp);
2123 2124
	}

2125
	/* Update rxq management counters */
2126
	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2127 2128 2129 2130

	return rx_done;
}

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
	struct mvneta_tx_desc *tx_desc;
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

	txq->tx_skb[txq->txq_put_index] = NULL;
	tx_desc = mvneta_txq_next_desc_get(txq);
	tx_desc->data_size = hdr_len;
	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
	tx_desc->command |= MVNETA_TXD_F_DESC;
	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
				 txq->txq_put_index * TSO_HEADER_SIZE;
	mvneta_txq_inc_put(txq);
}

static inline int
mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
		    struct sk_buff *skb, char *data, int size,
		    bool last_tcp, bool is_last)
{
	struct mvneta_tx_desc *tx_desc;

	tx_desc = mvneta_txq_next_desc_get(txq);
	tx_desc->data_size = size;
	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
						size, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev->dev.parent,
		     tx_desc->buf_phys_addr))) {
		mvneta_txq_desc_put(txq);
		return -ENOMEM;
	}

	tx_desc->command = 0;
	txq->tx_skb[txq->txq_put_index] = NULL;

	if (last_tcp) {
		/* last descriptor in the TCP packet */
		tx_desc->command = MVNETA_TXD_L_DESC;

		/* last descriptor in SKB */
		if (is_last)
			txq->tx_skb[txq->txq_put_index] = skb;
	}
	mvneta_txq_inc_put(txq);
	return 0;
}

static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
			 struct mvneta_tx_queue *txq)
{
	int total_len, data_left;
	int desc_count = 0;
	struct mvneta_port *pp = netdev_priv(dev);
	struct tso_t tso;
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int i;

	/* Count needed descriptors */
	if ((txq->count + tso_count_descs(skb)) >= txq->size)
		return 0;

	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
		pr_info("*** Is this even  possible???!?!?\n");
		return 0;
	}

	/* Initialize the TSO handler, and prepare the first payload */
	tso_start(skb, &tso);

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;
		desc_count++;

		/* prepare packet headers: MAC + IP + TCP */
		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);

		mvneta_tso_put_hdr(skb, pp, txq);

		while (data_left > 0) {
			int size;
			desc_count++;

			size = min_t(int, tso.size, data_left);

			if (mvneta_tso_put_data(dev, txq, skb,
						 tso.data, size,
						 size == data_left,
						 total_len == 0))
				goto err_release;
			data_left -= size;

			tso_build_data(skb, &tso, size);
		}
	}

	return desc_count;

err_release:
	/* Release all used data descriptors; header descriptors must not
	 * be DMA-unmapped.
	 */
	for (i = desc_count - 1; i >= 0; i--) {
		struct mvneta_tx_desc *tx_desc = txq->descs + i;
2241
		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2242 2243 2244 2245 2246 2247 2248 2249 2250
			dma_unmap_single(pp->dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size,
					 DMA_TO_DEVICE);
		mvneta_txq_desc_put(txq);
	}
	return 0;
}

2251 2252 2253 2254 2255
/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
				  struct mvneta_tx_queue *txq)
{
	struct mvneta_tx_desc *tx_desc;
2256
	int i, nr_frags = skb_shinfo(skb)->nr_frags;
2257

2258
	for (i = 0; i < nr_frags; i++) {
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		void *addr = page_address(frag->page.p) + frag->page_offset;

		tx_desc = mvneta_txq_next_desc_get(txq);
		tx_desc->data_size = frag->size;

		tx_desc->buf_phys_addr =
			dma_map_single(pp->dev->dev.parent, addr,
				       tx_desc->data_size, DMA_TO_DEVICE);

		if (dma_mapping_error(pp->dev->dev.parent,
				      tx_desc->buf_phys_addr)) {
			mvneta_txq_desc_put(txq);
			goto error;
		}

2275
		if (i == nr_frags - 1) {
2276 2277 2278 2279 2280 2281 2282 2283
			/* Last descriptor */
			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
			txq->tx_skb[txq->txq_put_index] = skb;
		} else {
			/* Descriptor in the middle: Not First, Not Last */
			tx_desc->command = 0;
			txq->tx_skb[txq->txq_put_index] = NULL;
		}
2284
		mvneta_txq_inc_put(txq);
2285 2286 2287 2288 2289 2290
	}

	return 0;

error:
	/* Release all descriptors that were used to map fragments of
2291 2292
	 * this packet, as well as the corresponding DMA mappings
	 */
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
	for (i = i - 1; i >= 0; i--) {
		tx_desc = txq->descs + i;
		dma_unmap_single(pp->dev->dev.parent,
				 tx_desc->buf_phys_addr,
				 tx_desc->data_size,
				 DMA_TO_DEVICE);
		mvneta_txq_desc_put(txq);
	}

	return -ENOMEM;
}

/* Main tx processing */
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
2309 2310
	u16 txq_id = skb_get_queue_mapping(skb);
	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2311
	struct mvneta_tx_desc *tx_desc;
2312
	int len = skb->len;
2313 2314 2315 2316 2317 2318
	int frags = 0;
	u32 tx_cmd;

	if (!netif_running(dev))
		goto out;

2319 2320 2321 2322 2323
	if (skb_is_gso(skb)) {
		frags = mvneta_tx_tso(skb, dev, txq);
		goto out;
	}

2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
	frags = skb_shinfo(skb)->nr_frags + 1;

	/* Get a descriptor for the first part of the packet */
	tx_desc = mvneta_txq_next_desc_get(txq);

	tx_cmd = mvneta_skb_tx_csum(pp, skb);

	tx_desc->data_size = skb_headlen(skb);

	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
						tx_desc->data_size,
						DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev->dev.parent,
				       tx_desc->buf_phys_addr))) {
		mvneta_txq_desc_put(txq);
		frags = 0;
		goto out;
	}

	if (frags == 1) {
		/* First and Last descriptor */
		tx_cmd |= MVNETA_TXD_FLZ_DESC;
		tx_desc->command = tx_cmd;
		txq->tx_skb[txq->txq_put_index] = skb;
		mvneta_txq_inc_put(txq);
	} else {
		/* First but not Last */
		tx_cmd |= MVNETA_TXD_F_DESC;
		txq->tx_skb[txq->txq_put_index] = NULL;
		mvneta_txq_inc_put(txq);
		tx_desc->command = tx_cmd;
		/* Continue with other skb fragments */
		if (mvneta_tx_frag_process(pp, skb, txq)) {
			dma_unmap_single(dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size,
					 DMA_TO_DEVICE);
			mvneta_txq_desc_put(txq);
			frags = 0;
			goto out;
		}
	}

out:
	if (frags > 0) {
2369
		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2370 2371
		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);

M
Marcin Wojtas 已提交
2372 2373
		netdev_tx_sent_queue(nq, len);

2374
		txq->count += frags;
2375
		if (txq->count >= txq->tx_stop_threshold)
2376
			netif_tx_stop_queue(nq);
2377

2378 2379 2380 2381 2382 2383
		if (!skb->xmit_more || netif_xmit_stopped(nq) ||
		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
			mvneta_txq_pend_desc_add(pp, txq, frags);
		else
			txq->pending += frags;

2384 2385
		u64_stats_update_begin(&stats->syncp);
		stats->tx_packets++;
2386
		stats->tx_bytes  += len;
2387
		u64_stats_update_end(&stats->syncp);
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
	} else {
		dev->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
	}

	return NETDEV_TX_OK;
}


/* Free tx resources, when resetting a port */
static void mvneta_txq_done_force(struct mvneta_port *pp,
				  struct mvneta_tx_queue *txq)

{
M
Marcin Wojtas 已提交
2402
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2403 2404
	int tx_done = txq->count;

M
Marcin Wojtas 已提交
2405
	mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2406 2407 2408 2409 2410 2411 2412

	/* reset txq */
	txq->count = 0;
	txq->txq_put_index = 0;
	txq->txq_get_index = 0;
}

2413 2414 2415
/* Handle tx done - called in softirq context. The <cause_tx_done> argument
 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
 */
2416
static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2417 2418 2419 2420
{
	struct mvneta_tx_queue *txq;
	struct netdev_queue *nq;

2421
	while (cause_tx_done) {
2422 2423 2424 2425 2426
		txq = mvneta_tx_done_policy(pp, cause_tx_done);

		nq = netdev_get_tx_queue(pp->dev, txq->id);
		__netif_tx_lock(nq, smp_processor_id());

2427 2428
		if (txq->count)
			mvneta_txq_done(pp, txq);
2429 2430 2431 2432 2433 2434

		__netif_tx_unlock(nq);
		cause_tx_done &= ~((1 << txq->id));
	}
}

2435
/* Compute crc8 of the specified address, using a unique algorithm ,
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
 * according to hw spec, different than generic crc8 algorithm
 */
static int mvneta_addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < ETH_ALEN; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

/* This method controls the net device special MAC multicast support.
 * The Special Multicast Table for MAC addresses supports MAC of the form
 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 * Table entries in the DA-Filter table. This method set the Special
 * Multicast Table appropriate entry.
 */
static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
					  unsigned char last_byte,
					  int queue)
{
	unsigned int smc_table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	/* Register offset from SMC table base    */
	tbl_offset = (last_byte / 4);
	/* Entry offset within the above reg */
	reg_offset = last_byte % 4;

	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
					+ tbl_offset * 4));

	if (queue == -1)
		smc_table_reg &= ~(0xff << (8 * reg_offset));
	else {
		smc_table_reg &= ~(0xff << (8 * reg_offset));
		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
		    smc_table_reg);
}

/* This method controls the network device Other MAC multicast support.
 * The Other Multicast Table is used for multicast of another type.
 * A CRC-8 is used as an index to the Other Multicast Table entries
 * in the DA-Filter table.
 * The method gets the CRC-8 value from the calling routine and
 * sets the Other Multicast Table appropriate entry according to the
 * specified CRC-8 .
 */
static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
					unsigned char crc8,
					int queue)
{
	unsigned int omc_table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */

	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);

	if (queue == -1) {
		/* Clear accepts frame bit at specified Other DA table entry */
		omc_table_reg &= ~(0xff << (8 * reg_offset));
	} else {
		omc_table_reg &= ~(0xff << (8 * reg_offset));
		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
}

/* The network device supports multicast using two tables:
 *    1) Special Multicast Table for MAC addresses of the form
 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 *       Table entries in the DA-Filter table.
 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
 *       is used as an index to the Other Multicast Table entries in the
 *       DA-Filter table.
 */
static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
				 int queue)
{
	unsigned char crc_result = 0;

	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
		return 0;
	}

	crc_result = mvneta_addr_crc(p_addr);
	if (queue == -1) {
		if (pp->mcast_count[crc_result] == 0) {
			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
				    crc_result);
			return -EINVAL;
		}

		pp->mcast_count[crc_result]--;
		if (pp->mcast_count[crc_result] != 0) {
			netdev_info(pp->dev,
				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
				    pp->mcast_count[crc_result], crc_result);
			return -EINVAL;
		}
	} else
		pp->mcast_count[crc_result]++;

	mvneta_set_other_mcast_addr(pp, crc_result, queue);

	return 0;
}

/* Configure Fitering mode of Ethernet port */
static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
					  int is_promisc)
{
	u32 port_cfg_reg, val;

	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);

	val = mvreg_read(pp, MVNETA_TYPE_PRIO);

	/* Set / Clear UPM bit in port configuration register */
	if (is_promisc) {
		/* Accept all Unicast addresses */
		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
		val |= MVNETA_FORCE_UNI;
		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
	} else {
		/* Reject all Unicast addresses */
		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
		val &= ~MVNETA_FORCE_UNI;
	}

	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
}

/* register unicast and multicast addresses */
static void mvneta_set_rx_mode(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
	struct netdev_hw_addr *ha;

	if (dev->flags & IFF_PROMISC) {
		/* Accept all: Multicast + Unicast */
		mvneta_rx_unicast_promisc_set(pp, 1);
2600 2601 2602
		mvneta_set_ucast_table(pp, pp->rxq_def);
		mvneta_set_special_mcast_table(pp, pp->rxq_def);
		mvneta_set_other_mcast_table(pp, pp->rxq_def);
2603 2604 2605 2606
	} else {
		/* Accept single Unicast */
		mvneta_rx_unicast_promisc_set(pp, 0);
		mvneta_set_ucast_table(pp, -1);
2607
		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2608 2609 2610

		if (dev->flags & IFF_ALLMULTI) {
			/* Accept all multicast */
2611 2612
			mvneta_set_special_mcast_table(pp, pp->rxq_def);
			mvneta_set_other_mcast_table(pp, pp->rxq_def);
2613 2614 2615 2616 2617 2618 2619 2620
		} else {
			/* Accept only initialized multicast */
			mvneta_set_special_mcast_table(pp, -1);
			mvneta_set_other_mcast_table(pp, -1);

			if (!netdev_mc_empty(dev)) {
				netdev_for_each_mc_addr(ha, dev) {
					mvneta_mcast_addr_set(pp, ha->addr,
2621
							      pp->rxq_def);
2622 2623 2624 2625 2626 2627 2628 2629
				}
			}
		}
	}
}

/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
{
	struct mvneta_port *pp = (struct mvneta_port *)dev_id;

	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
	napi_schedule(&pp->napi);

	return IRQ_HANDLED;
}

/* Interrupt handling - the callback for request_percpu_irq() */
static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2641
{
2642
	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2643

2644 2645
	disable_percpu_irq(port->pp->dev->irq);
	napi_schedule(&port->napi);
2646 2647 2648 2649

	return IRQ_HANDLED;
}

R
Russell King 已提交
2650
static void mvneta_link_change(struct mvneta_port *pp)
2651 2652 2653
{
	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);

R
Russell King 已提交
2654
	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2655 2656
}

2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
/* NAPI handler
 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
 * Bits 8 -15 of the cause Rx Tx register indicate that are received
 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
 * Each CPU has its own causeRxTx register
 */
static int mvneta_poll(struct napi_struct *napi, int budget)
{
	int rx_done = 0;
	u32 cause_rx_tx;
2668
	int rx_queue;
2669
	struct mvneta_port *pp = netdev_priv(napi->dev);
2670
	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2671 2672

	if (!netif_running(pp->dev)) {
2673
		napi_complete(napi);
2674 2675 2676 2677
		return rx_done;
	}

	/* Read cause register */
2678 2679 2680 2681 2682
	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);

		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
R
Russell King 已提交
2683 2684 2685 2686 2687

		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
				  MVNETA_CAUSE_LINK_CHANGE |
				  MVNETA_CAUSE_PSC_SYNC_CHANGE))
			mvneta_link_change(pp);
2688
	}
2689 2690 2691

	/* Release Tx descriptors */
	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2692
		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2693 2694
		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
	}
2695

2696
	/* For the case where the last mvneta_poll did not process all
2697 2698
	 * RX packets
	 */
2699 2700
	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));

2701 2702
	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
		port->cause_rx_tx;
2703 2704 2705

	if (rx_queue) {
		rx_queue = rx_queue - 1;
2706 2707 2708 2709
		if (pp->bm_priv)
			rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
		else
			rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
2710 2711
	}

2712
	if (rx_done < budget) {
2713
		cause_rx_tx = 0;
2714
		napi_complete_done(napi, rx_done);
2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727

		if (pp->neta_armada3700) {
			unsigned long flags;

			local_irq_save(flags);
			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
				    MVNETA_RX_INTR_MASK(rxq_number) |
				    MVNETA_TX_INTR_MASK(txq_number) |
				    MVNETA_MISCINTR_INTR_MASK);
			local_irq_restore(flags);
		} else {
			enable_percpu_irq(pp->dev->irq, 0);
		}
2728 2729
	}

2730 2731 2732 2733 2734
	if (pp->neta_armada3700)
		pp->cause_rx_tx = cause_rx_tx;
	else
		port->cause_rx_tx = cause_rx_tx;

2735 2736 2737 2738 2739 2740 2741 2742 2743 2744
	return rx_done;
}

/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
			   int num)
{
	int i;

	for (i = 0; i < num; i++) {
2745
		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2746
		if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2747
			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
2748 2749 2750 2751 2752 2753
				__func__, rxq->id, i, num);
			break;
		}
	}

	/* Add this number of RX descriptors as non occupied (ready to
2754 2755
	 * get packets)
	 */
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
	mvneta_rxq_non_occup_desc_add(pp, rxq, i);

	return i;
}

/* Free all packets pending transmit from all TXQs and reset TX port */
static void mvneta_tx_reset(struct mvneta_port *pp)
{
	int queue;

2766
	/* free the skb's in the tx ring */
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
	for (queue = 0; queue < txq_number; queue++)
		mvneta_txq_done_force(pp, &pp->txqs[queue]);

	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
}

static void mvneta_rx_reset(struct mvneta_port *pp)
{
	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
}

/* Rx/Tx queue initialization/cleanup methods */

/* Create a specified RX queue */
static int mvneta_rxq_init(struct mvneta_port *pp,
			   struct mvneta_rx_queue *rxq)

{
	rxq->size = pp->rx_ring_size;

	/* Allocate memory for RX descriptors */
	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
					&rxq->descs_phys, GFP_KERNEL);
2793
	if (!rxq->descs)
2794 2795 2796 2797 2798 2799 2800 2801 2802
		return -ENOMEM;

	rxq->last_desc = rxq->size - 1;

	/* Set Rx descriptors queue starting address */
	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);

	/* Set Offset */
2803
	mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
2804 2805 2806 2807 2808

	/* Set coalescing pkts and time */
	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);

2809 2810 2811 2812 2813
	if (!pp->bm_priv) {
		/* Fill RXQ with buffers from RX pool */
		mvneta_rxq_buf_size_set(pp, rxq,
					MVNETA_RX_BUF_SIZE(pp->pkt_size));
		mvneta_rxq_bm_disable(pp, rxq);
2814
		mvneta_rxq_fill(pp, rxq, rxq->size);
2815 2816 2817 2818
	} else {
		mvneta_rxq_bm_enable(pp, rxq);
		mvneta_rxq_long_pool_set(pp, rxq);
		mvneta_rxq_short_pool_set(pp, rxq);
2819
		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2820 2821
	}

2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
	return 0;
}

/* Cleanup Rx queue */
static void mvneta_rxq_deinit(struct mvneta_port *pp,
			      struct mvneta_rx_queue *rxq)
{
	mvneta_rxq_drop_pkts(pp, rxq);

	if (rxq->descs)
		dma_free_coherent(pp->dev->dev.parent,
				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
				  rxq->descs,
				  rxq->descs_phys);

	rxq->descs             = NULL;
	rxq->last_desc         = 0;
	rxq->next_desc_to_proc = 0;
	rxq->descs_phys        = 0;
}

/* Create and initialize a tx queue */
static int mvneta_txq_init(struct mvneta_port *pp,
			   struct mvneta_tx_queue *txq)
{
2847 2848
	int cpu;

2849 2850
	txq->size = pp->tx_ring_size;

2851 2852 2853 2854 2855 2856 2857 2858
	/* A queue must always have room for at least one skb.
	 * Therefore, stop the queue when the free entries reaches
	 * the maximum number of descriptors per skb.
	 */
	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;


2859 2860 2861 2862
	/* Allocate memory for TX descriptors */
	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
					txq->size * MVNETA_DESC_ALIGNED_SIZE,
					&txq->descs_phys, GFP_KERNEL);
2863
	if (!txq->descs)
2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
		return -ENOMEM;

	txq->last_desc = txq->size - 1;

	/* Set maximum bandwidth for enabled TXQs */
	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);

	/* Set Tx descriptors queue starting address */
	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);

2876 2877
	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
				    GFP_KERNEL);
2878
	if (!txq->tx_skb) {
2879 2880 2881 2882 2883
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
				  txq->descs, txq->descs_phys);
		return -ENOMEM;
	}
2884 2885 2886 2887 2888

	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
					   txq->size * TSO_HEADER_SIZE,
					   &txq->tso_hdrs_phys, GFP_KERNEL);
2889
	if (!txq->tso_hdrs) {
2890 2891 2892 2893 2894 2895
		kfree(txq->tx_skb);
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
				  txq->descs, txq->descs_phys);
		return -ENOMEM;
	}
2896 2897
	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);

2898 2899 2900 2901 2902 2903 2904 2905
	/* Setup XPS mapping */
	if (txq_number > 1)
		cpu = txq->id % num_present_cpus();
	else
		cpu = pp->rxq_def % num_present_cpus();
	cpumask_set_cpu(cpu, &txq->affinity_mask);
	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);

2906 2907 2908 2909 2910 2911 2912
	return 0;
}

/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
static void mvneta_txq_deinit(struct mvneta_port *pp,
			      struct mvneta_tx_queue *txq)
{
M
Marcin Wojtas 已提交
2913 2914
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);

2915 2916
	kfree(txq->tx_skb);

2917 2918 2919 2920
	if (txq->tso_hdrs)
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * TSO_HEADER_SIZE,
				  txq->tso_hdrs, txq->tso_hdrs_phys);
2921 2922 2923 2924 2925
	if (txq->descs)
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
				  txq->descs, txq->descs_phys);

M
Marcin Wojtas 已提交
2926 2927
	netdev_tx_reset_queue(nq);

2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
	txq->descs             = NULL;
	txq->last_desc         = 0;
	txq->next_desc_to_proc = 0;
	txq->descs_phys        = 0;

	/* Set minimum bandwidth for disabled TXQs */
	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);

	/* Set Tx descriptors queue starting address and size */
	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
}

/* Cleanup all Tx queues */
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
{
	int queue;

	for (queue = 0; queue < txq_number; queue++)
		mvneta_txq_deinit(pp, &pp->txqs[queue]);
}

/* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
2954 2955
	int queue;

2956
	for (queue = 0; queue < rxq_number; queue++)
2957
		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2958 2959 2960 2961 2962 2963
}


/* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp)
{
2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
	int queue;

	for (queue = 0; queue < rxq_number; queue++) {
		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);

		if (err) {
			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
				   __func__, queue);
			mvneta_cleanup_rxqs(pp);
			return err;
		}
2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
	}

	return 0;
}

/* Init all tx queues */
static int mvneta_setup_txqs(struct mvneta_port *pp)
{
	int queue;

	for (queue = 0; queue < txq_number; queue++) {
		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
		if (err) {
			netdev_err(pp->dev, "%s: can't create txq=%d\n",
				   __func__, queue);
			mvneta_cleanup_txqs(pp);
			return err;
		}
	}

	return 0;
}

static void mvneta_start_dev(struct mvneta_port *pp)
{
3000
	int cpu;
3001

3002 3003 3004 3005 3006 3007
	mvneta_max_rx_size_set(pp, pp->pkt_size);
	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);

	/* start the Rx/Tx activity */
	mvneta_port_enable(pp);

3008 3009 3010 3011 3012
	if (!pp->neta_armada3700) {
		/* Enable polling on the port */
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
3013

3014 3015 3016 3017
			napi_enable(&port->napi);
		}
	} else {
		napi_enable(&pp->napi);
3018
	}
3019

3020
	/* Unmask interrupts. It has to be done from each CPU */
3021 3022
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);

3023 3024 3025 3026
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
		    MVNETA_CAUSE_LINK_CHANGE |
		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
3027

R
Russell King 已提交
3028
	phylink_start(pp->phylink);
3029 3030 3031 3032 3033
	netif_tx_start_all_queues(pp->dev);
}

static void mvneta_stop_dev(struct mvneta_port *pp)
{
3034 3035
	unsigned int cpu;

R
Russell King 已提交
3036
	phylink_stop(pp->phylink);
3037

3038 3039 3040 3041
	if (!pp->neta_armada3700) {
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
3042

3043 3044 3045 3046
			napi_disable(&port->napi);
		}
	} else {
		napi_disable(&pp->napi);
3047
	}
3048 3049 3050 3051 3052 3053 3054 3055 3056 3057

	netif_carrier_off(pp->dev);

	mvneta_port_down(pp);
	netif_tx_stop_all_queues(pp->dev);

	/* Stop the port activity */
	mvneta_port_disable(pp);

	/* Clear all ethernet port interrupts */
3058
	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3059 3060

	/* Mask all ethernet port interrupts */
3061
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3062 3063 3064 3065 3066

	mvneta_tx_reset(pp);
	mvneta_rx_reset(pp);
}

3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080
static void mvneta_percpu_enable(void *arg)
{
	struct mvneta_port *pp = arg;

	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
}

static void mvneta_percpu_disable(void *arg)
{
	struct mvneta_port *pp = arg;

	disable_percpu_irq(pp->dev->irq);
}

3081 3082 3083 3084 3085 3086
/* Change the device mtu */
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
	struct mvneta_port *pp = netdev_priv(dev);
	int ret;

3087 3088 3089 3090 3091
	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
	}
3092 3093 3094

	dev->mtu = mtu;

3095
	if (!netif_running(dev)) {
3096 3097 3098
		if (pp->bm_priv)
			mvneta_bm_update_mtu(pp, mtu);

3099
		netdev_update_features(dev);
3100
		return 0;
3101
	}
3102

3103
	/* The interface is running, so we have to force a
3104
	 * reallocation of the queues
3105 3106
	 */
	mvneta_stop_dev(pp);
3107
	on_each_cpu(mvneta_percpu_disable, pp, true);
3108 3109 3110 3111

	mvneta_cleanup_txqs(pp);
	mvneta_cleanup_rxqs(pp);

3112 3113 3114
	if (pp->bm_priv)
		mvneta_bm_update_mtu(pp, mtu);

3115
	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3116 3117
	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3118 3119 3120

	ret = mvneta_setup_rxqs(pp);
	if (ret) {
3121
		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3122 3123 3124
		return ret;
	}

3125 3126 3127 3128 3129
	ret = mvneta_setup_txqs(pp);
	if (ret) {
		netdev_err(dev, "unable to setup txqs after MTU change\n");
		return ret;
	}
3130

3131
	on_each_cpu(mvneta_percpu_enable, pp, true);
3132 3133 3134
	mvneta_start_dev(pp);
	mvneta_port_up(pp);

3135 3136
	netdev_update_features(dev);

3137 3138 3139
	return 0;
}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
static netdev_features_t mvneta_fix_features(struct net_device *dev,
					     netdev_features_t features)
{
	struct mvneta_port *pp = netdev_priv(dev);

	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
		netdev_info(dev,
			    "Disable IP checksum for MTU greater than %dB\n",
			    pp->tx_csum_limit);
	}

	return features;
}

3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
/* Get mac address */
static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
{
	u32 mac_addr_l, mac_addr_h;

	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
	addr[0] = (mac_addr_h >> 24) & 0xFF;
	addr[1] = (mac_addr_h >> 16) & 0xFF;
	addr[2] = (mac_addr_h >> 8) & 0xFF;
	addr[3] = mac_addr_h & 0xFF;
	addr[4] = (mac_addr_l >> 8) & 0xFF;
	addr[5] = mac_addr_l & 0xFF;
}

3170 3171 3172 3173
/* Handle setting mac address */
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{
	struct mvneta_port *pp = netdev_priv(dev);
3174 3175
	struct sockaddr *sockaddr = addr;
	int ret;
3176

3177 3178 3179
	ret = eth_prepare_mac_addr_change(dev, addr);
	if (ret < 0)
		return ret;
3180 3181 3182 3183
	/* Remove previous address table entry */
	mvneta_mac_addr_set(pp, dev->dev_addr, -1);

	/* Set new addr in hw */
3184
	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3185

3186
	eth_commit_mac_addr_change(dev, addr);
3187 3188 3189
	return 0;
}

R
Russell King 已提交
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223
static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
			    struct phylink_link_state *state)
{
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };

	/* We only support QSGMII, SGMII and RGMII modes */
	if (state->interface != PHY_INTERFACE_MODE_NA &&
	    state->interface != PHY_INTERFACE_MODE_QSGMII &&
	    state->interface != PHY_INTERFACE_MODE_SGMII &&
	    !phy_interface_mode_is_rgmii(state->interface)) {
		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
		return;
	}

	/* Allow all the expected bits */
	phylink_set(mask, Autoneg);
	phylink_set_port_modes(mask);

	/* Half-duplex at speeds higher than 100Mbit is unsupported */
	phylink_set(mask, 1000baseT_Full);
	phylink_set(mask, 1000baseX_Full);
	phylink_set(mask, 10baseT_Half);
	phylink_set(mask, 10baseT_Full);
	phylink_set(mask, 100baseT_Half);
	phylink_set(mask, 100baseT_Full);

	bitmap_and(supported, supported, mask,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
	bitmap_and(state->advertising, state->advertising, mask,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
}

static int mvneta_mac_link_state(struct net_device *ndev,
				 struct phylink_link_state *state)
3224 3225
{
	struct mvneta_port *pp = netdev_priv(ndev);
R
Russell King 已提交
3226
	u32 gmac_stat;
3227

R
Russell King 已提交
3228
	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3229

R
Russell King 已提交
3230 3231 3232 3233 3234 3235
	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
		state->speed = SPEED_1000;
	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
		state->speed = SPEED_100;
	else
		state->speed = SPEED_10;
3236

R
Russell King 已提交
3237 3238 3239
	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3240

R
Russell King 已提交
3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283
	state->pause = 0;

	return 1;
}

static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
	const struct phylink_link_state *state)
{
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);

	new_ctrl2 = gmac_ctrl2 & ~MVNETA_GMAC2_INBAND_AN_ENABLE;
	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
			     MVNETA_GMAC_INBAND_RESTART_AN |
			     MVNETA_GMAC_CONFIG_MII_SPEED |
			     MVNETA_GMAC_CONFIG_GMII_SPEED |
			     MVNETA_GMAC_AN_SPEED_EN |
			     MVNETA_GMAC_AN_FLOW_CTRL_EN |
			     MVNETA_GMAC_CONFIG_FULL_DUPLEX |
			     MVNETA_GMAC_AN_DUPLEX_EN);

	if (!phylink_autoneg_inband(mode)) {
		/* Phy or fixed speed */
		if (state->duplex)
			new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;

		if (state->speed == SPEED_1000)
			new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
		else if (state->speed == SPEED_100)
			new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
	} else {
		/* SGMII mode receives the state from the PHY */
		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
				     MVNETA_GMAC_FORCE_LINK_PASS)) |
			 MVNETA_GMAC_INBAND_AN_ENABLE |
			 MVNETA_GMAC_AN_SPEED_EN |
			 MVNETA_GMAC_AN_DUPLEX_EN;
	}
3284

R
Russell King 已提交
3285 3286 3287 3288 3289 3290 3291 3292
	/* Armada 370 documentation says we can only change the port mode
	 * and in-band enable when the link is down, so force it down
	 * while making these changes. We also do this for GMAC_CTRL2 */
	if ((new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
	    (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
			    (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
			    MVNETA_GMAC_FORCE_LINK_DOWN);
3293
	}
R
Russell King 已提交
3294 3295 3296 3297 3298 3299 3300

	if (new_ctrl2 != gmac_ctrl2)
		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
	if (new_clk != gmac_clk)
		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
	if (new_an != gmac_an)
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3301
}
3302

R
Russell King 已提交
3303
static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
3304 3305 3306 3307
{
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 val;

R
Russell King 已提交
3308 3309 3310
	mvneta_port_down(pp);

	if (!phylink_autoneg_inband(mode)) {
3311 3312 3313 3314 3315 3316 3317
		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
	}
}

R
Russell King 已提交
3318 3319
static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
			       struct phy_device *phy)
3320 3321 3322 3323
{
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 val;

R
Russell King 已提交
3324
	if (!phylink_autoneg_inband(mode)) {
3325 3326 3327 3328
		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
		val |= MVNETA_GMAC_FORCE_LINK_PASS;
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3329 3330
	}

3331 3332 3333
	mvneta_port_up(pp);
}

R
Russell King 已提交
3334 3335 3336 3337 3338 3339 3340
static const struct phylink_mac_ops mvneta_phylink_ops = {
	.validate = mvneta_validate,
	.mac_link_state = mvneta_mac_link_state,
	.mac_config = mvneta_mac_config,
	.mac_link_down = mvneta_mac_link_down,
	.mac_link_up = mvneta_mac_link_up,
};
3341 3342 3343

static int mvneta_mdio_probe(struct mvneta_port *pp)
{
3344
	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
R
Russell King 已提交
3345
	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3346

R
Russell King 已提交
3347 3348
	if (err)
		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3349

R
Russell King 已提交
3350
	phylink_ethtool_get_wol(pp->phylink, &wol);
3351 3352
	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);

R
Russell King 已提交
3353
	return err;
3354 3355 3356 3357
}

static void mvneta_mdio_remove(struct mvneta_port *pp)
{
R
Russell King 已提交
3358
	phylink_disconnect_phy(pp->phylink);
3359 3360
}

3361 3362 3363 3364
/* Electing a CPU must be done in an atomic way: it should be done
 * after or before the removal/insertion of a CPU and this function is
 * not reentrant.
 */
3365 3366
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
3367 3368 3369 3370 3371 3372 3373
	int elected_cpu = 0, max_cpu, cpu, i = 0;

	/* Use the cpu associated to the rxq when it is online, in all
	 * the other cases, use the cpu 0 which can't be offline.
	 */
	if (cpu_online(pp->rxq_def))
		elected_cpu = pp->rxq_def;
3374

3375
	max_cpu = num_present_cpus();
3376 3377

	for_each_online_cpu(cpu) {
3378 3379 3380 3381 3382 3383 3384
		int rxq_map = 0, txq_map = 0;
		int rxq;

		for (rxq = 0; rxq < rxq_number; rxq++)
			if ((rxq % max_cpu) == cpu)
				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);

3385
		if (cpu == elected_cpu)
3386 3387
			/* Map the default receive queue queue to the
			 * elected CPU
3388
			 */
3389
			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3390 3391 3392 3393 3394 3395

		/* We update the TX queue map only if we have one
		 * queue. In this case we associate the TX queue to
		 * the CPU bound to the default RX queue
		 */
		if (txq_number == 1)
3396
			txq_map = (cpu == elected_cpu) ?
3397 3398 3399 3400 3401
				MVNETA_CPU_TXQ_ACCESS(1) : 0;
		else
			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;

3402 3403 3404 3405 3406 3407 3408
		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);

		/* Update the interrupt mask on each CPU according the
		 * new mapping
		 */
		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
					 pp, true);
3409
		i++;
3410

3411 3412 3413
	}
};

3414
static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3415
{
3416 3417 3418
	int other_cpu;
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_online);
3419 3420 3421
	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);


3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
	spin_lock(&pp->lock);
	/*
	 * Configuring the driver for a new CPU while the driver is
	 * stopping is racy, so just avoid it.
	 */
	if (pp->is_stopped) {
		spin_unlock(&pp->lock);
		return 0;
	}
	netif_tx_stop_all_queues(pp->dev);
3432

3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
	/*
	 * We have to synchronise on tha napi of each CPU except the one
	 * just being woken up
	 */
	for_each_online_cpu(other_cpu) {
		if (other_cpu != cpu) {
			struct mvneta_pcpu_port *other_port =
				per_cpu_ptr(pp->ports, other_cpu);

			napi_synchronize(&other_port->napi);
3443
		}
3444
	}
3445

3446 3447 3448
	/* Mask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
	napi_enable(&port->napi);
3449

3450 3451 3452 3453 3454
	/*
	 * Enable per-CPU interrupts on the CPU that is
	 * brought up.
	 */
	mvneta_percpu_enable(pp);
3455

3456 3457 3458 3459 3460
	/*
	 * Enable per-CPU interrupt on the one CPU we care
	 * about.
	 */
	mvneta_percpu_elect(pp);
3461

3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
	/* Unmask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
		    MVNETA_CAUSE_LINK_CHANGE |
		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
	netif_tx_start_all_queues(pp->dev);
	spin_unlock(&pp->lock);
	return 0;
}
3472

3473 3474 3475 3476 3477
static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
{
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_online);
	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3478

3479 3480 3481 3482 3483 3484 3485 3486
	/*
	 * Thanks to this lock we are sure that any pending cpu election is
	 * done.
	 */
	spin_lock(&pp->lock);
	/* Mask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
	spin_unlock(&pp->lock);
3487

3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
	napi_synchronize(&port->napi);
	napi_disable(&port->napi);
	/* Disable per-CPU interrupts on the CPU that is brought down. */
	mvneta_percpu_disable(pp);
	return 0;
}

static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_dead);

	/* Check if a new CPU must be elected now this on is down */
	spin_lock(&pp->lock);
	mvneta_percpu_elect(pp);
	spin_unlock(&pp->lock);
	/* Unmask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
		    MVNETA_CAUSE_LINK_CHANGE |
		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
	netif_tx_start_all_queues(pp->dev);
	return 0;
3512 3513
}

3514 3515 3516
static int mvneta_open(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
3517
	int ret;
3518 3519

	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3520 3521
	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3522 3523 3524 3525 3526 3527 3528 3529 3530 3531

	ret = mvneta_setup_rxqs(pp);
	if (ret)
		return ret;

	ret = mvneta_setup_txqs(pp);
	if (ret)
		goto err_cleanup_rxqs;

	/* Connect to port interrupt line */
3532 3533 3534 3535 3536 3537
	if (pp->neta_armada3700)
		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
				  dev->name, pp);
	else
		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
					 dev->name, pp->ports);
3538 3539 3540 3541 3542
	if (ret) {
		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
		goto err_cleanup_txqs;
	}

3543 3544 3545 3546 3547
	if (!pp->neta_armada3700) {
		/* Enable per-CPU interrupt on all the CPU to handle our RX
		 * queue interrupts
		 */
		on_each_cpu(mvneta_percpu_enable, pp, true);
3548

3549 3550 3551 3552 3553 3554 3555 3556
		pp->is_stopped = false;
		/* Register a CPU notifier to handle the case where our CPU
		 * might be taken offline.
		 */
		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
						       &pp->node_online);
		if (ret)
			goto err_free_irq;
3557

3558 3559 3560 3561 3562
		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						       &pp->node_dead);
		if (ret)
			goto err_free_online_hp;
	}
3563

3564 3565 3566 3567 3568 3569
	/* In default link is down */
	netif_carrier_off(pp->dev);

	ret = mvneta_mdio_probe(pp);
	if (ret < 0) {
		netdev_err(dev, "cannot probe MDIO bus\n");
3570
		goto err_free_dead_hp;
3571 3572 3573 3574 3575 3576
	}

	mvneta_start_dev(pp);

	return 0;

3577
err_free_dead_hp:
3578 3579 3580
	if (!pp->neta_armada3700)
		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						    &pp->node_dead);
3581
err_free_online_hp:
3582 3583 3584
	if (!pp->neta_armada3700)
		cpuhp_state_remove_instance_nocalls(online_hpstate,
						    &pp->node_online);
3585
err_free_irq:
3586 3587 3588 3589 3590 3591
	if (pp->neta_armada3700) {
		free_irq(pp->dev->irq, pp);
	} else {
		on_each_cpu(mvneta_percpu_disable, pp, true);
		free_percpu_irq(pp->dev->irq, pp->ports);
	}
3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603
err_cleanup_txqs:
	mvneta_cleanup_txqs(pp);
err_cleanup_rxqs:
	mvneta_cleanup_rxqs(pp);
	return ret;
}

/* Stop the port, free port interrupt line */
static int mvneta_stop(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);

3604 3605 3606 3607 3608 3609 3610 3611 3612
	if (!pp->neta_armada3700) {
		/* Inform that we are stopping so we don't want to setup the
		 * driver for new CPUs in the notifiers. The code of the
		 * notifier for CPU online is protected by the same spinlock,
		 * so when we get the lock, the notifer work is done.
		 */
		spin_lock(&pp->lock);
		pp->is_stopped = true;
		spin_unlock(&pp->lock);
3613

3614 3615
		mvneta_stop_dev(pp);
		mvneta_mdio_remove(pp);
3616

3617 3618 3619 3620
		cpuhp_state_remove_instance_nocalls(online_hpstate,
						    &pp->node_online);
		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						    &pp->node_dead);
3621 3622 3623 3624 3625 3626 3627 3628
		on_each_cpu(mvneta_percpu_disable, pp, true);
		free_percpu_irq(dev->irq, pp->ports);
	} else {
		mvneta_stop_dev(pp);
		mvneta_mdio_remove(pp);
		free_irq(dev->irq, pp);
	}

3629 3630 3631 3632 3633 3634
	mvneta_cleanup_rxqs(pp);
	mvneta_cleanup_txqs(pp);

	return 0;
}

3635 3636
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
R
Russell King 已提交
3637
	struct mvneta_port *pp = netdev_priv(dev);
3638

R
Russell King 已提交
3639
	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
3640 3641
}

3642 3643
/* Ethtool methods */

3644
/* Set link ksettings (phy address, speed) for ethtools */
3645 3646 3647
static int
mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
				  const struct ethtool_link_ksettings *cmd)
3648
{
3649
	struct mvneta_port *pp = netdev_priv(ndev);
3650

R
Russell King 已提交
3651 3652
	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
}
3653

R
Russell King 已提交
3654 3655 3656 3657 3658 3659
/* Get link ksettings for ethtools */
static int
mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
				  struct ethtool_link_ksettings *cmd)
{
	struct mvneta_port *pp = netdev_priv(ndev);
3660

R
Russell King 已提交
3661 3662
	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
}
3663

R
Russell King 已提交
3664 3665 3666
static int mvneta_ethtool_nway_reset(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
3667

R
Russell King 已提交
3668
	return phylink_ethtool_nway_reset(pp->phylink);
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
}

/* Set interrupt coalescing for ethtools */
static int mvneta_ethtool_set_coalesce(struct net_device *dev,
				       struct ethtool_coalesce *c)
{
	struct mvneta_port *pp = netdev_priv(dev);
	int queue;

	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
		rxq->time_coal = c->rx_coalesce_usecs;
		rxq->pkts_coal = c->rx_max_coalesced_frames;
		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
	}

	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
		txq->done_pkts_coal = c->tx_max_coalesced_frames;
		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
	}

	return 0;
}

/* get coalescing for ethtools */
static int mvneta_ethtool_get_coalesce(struct net_device *dev,
				       struct ethtool_coalesce *c)
{
	struct mvneta_port *pp = netdev_priv(dev);

	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;

	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
	return 0;
}


static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
{
	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
		sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
		sizeof(drvinfo->version));
	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
		sizeof(drvinfo->bus_info));
}


static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
					 struct ethtool_ringparam *ring)
{
	struct mvneta_port *pp = netdev_priv(netdev);

	ring->rx_max_pending = MVNETA_MAX_RXD;
	ring->tx_max_pending = MVNETA_MAX_TXD;
	ring->rx_pending = pp->rx_ring_size;
	ring->tx_pending = pp->tx_ring_size;
}

static int mvneta_ethtool_set_ringparam(struct net_device *dev,
					struct ethtool_ringparam *ring)
{
	struct mvneta_port *pp = netdev_priv(dev);

	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
		return -EINVAL;
	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
		ring->rx_pending : MVNETA_MAX_RXD;
3741 3742 3743 3744 3745 3746

	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
	if (pp->tx_ring_size != ring->tx_pending)
		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
			    pp->tx_ring_size, ring->tx_pending);
3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759

	if (netif_running(dev)) {
		mvneta_stop(dev);
		if (mvneta_open(dev)) {
			netdev_err(dev,
				   "error on opening device after ring param change\n");
			return -ENOMEM;
		}
	}

	return 0;
}

R
Russell King 已提交
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776
static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
				       u8 *data)
{
	if (sset == ETH_SS_STATS) {
		int i;

		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
	}
}

static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
	const struct mvneta_statistic *s;
	void __iomem *base = pp->base;
	u32 high, low, val;
3777
	u64 val64;
R
Russell King 已提交
3778 3779 3780 3781 3782 3783 3784 3785
	int i;

	for (i = 0, s = mvneta_statistics;
	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
	     s++, i++) {
		switch (s->type) {
		case T_REG_32:
			val = readl_relaxed(base + s->offset);
3786
			pp->ethtool_stats[i] += val;
R
Russell King 已提交
3787 3788 3789 3790 3791
			break;
		case T_REG_64:
			/* Docs say to read low 32-bit then high */
			low = readl_relaxed(base + s->offset);
			high = readl_relaxed(base + s->offset + 4);
3792 3793
			val64 = (u64)high << 32 | low;
			pp->ethtool_stats[i] += val64;
R
Russell King 已提交
3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817
			break;
		}
	}
}

static void mvneta_ethtool_get_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 *data)
{
	struct mvneta_port *pp = netdev_priv(dev);
	int i;

	mvneta_ethtool_update_stats(pp);

	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
		*data++ = pp->ethtool_stats[i];
}

static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
	if (sset == ETH_SS_STATS)
		return ARRAY_SIZE(mvneta_statistics);
	return -EOPNOTSUPP;
}

3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
	return MVNETA_RSS_LU_TABLE_SIZE;
}

static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
				    struct ethtool_rxnfc *info,
				    u32 *rules __always_unused)
{
	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data =  rxq_number;
		return 0;
	case ETHTOOL_GRXFH:
		return -EOPNOTSUPP;
	default:
		return -EOPNOTSUPP;
	}
}

static int  mvneta_config_rss(struct mvneta_port *pp)
{
	int cpu;
	u32 val;

	netif_tx_stop_all_queues(pp->dev);

3845
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865

	/* We have to synchronise on the napi of each CPU */
	for_each_online_cpu(cpu) {
		struct mvneta_pcpu_port *pcpu_port =
			per_cpu_ptr(pp->ports, cpu);

		napi_synchronize(&pcpu_port->napi);
		napi_disable(&pcpu_port->napi);
	}

	pp->rxq_def = pp->indir[0];

	/* Update unicast mapping */
	mvneta_set_rx_mode(pp->dev);

	/* Update val of portCfg register accordingly with all RxQueue types */
	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
	mvreg_write(pp, MVNETA_PORT_CONFIG, val);

	/* Update the elected CPU matching the new rxq_def */
3866
	spin_lock(&pp->lock);
3867
	mvneta_percpu_elect(pp);
3868
	spin_unlock(&pp->lock);
3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886

	/* We have to synchronise on the napi of each CPU */
	for_each_online_cpu(cpu) {
		struct mvneta_pcpu_port *pcpu_port =
			per_cpu_ptr(pp->ports, cpu);

		napi_enable(&pcpu_port->napi);
	}

	netif_tx_start_all_queues(pp->dev);

	return 0;
}

static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
				   const u8 *key, const u8 hfunc)
{
	struct mvneta_port *pp = netdev_priv(dev);
3887 3888 3889 3890 3891

	/* Current code for Armada 3700 doesn't support RSS features yet */
	if (pp->neta_armada3700)
		return -EOPNOTSUPP;

3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911
	/* We require at least one supported parameter to be changed
	 * and no change in any of the unsupported parameters
	 */
	if (key ||
	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
		return -EOPNOTSUPP;

	if (!indir)
		return 0;

	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);

	return mvneta_config_rss(pp);
}

static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
				   u8 *hfunc)
{
	struct mvneta_port *pp = netdev_priv(dev);

3912 3913 3914 3915
	/* Current code for Armada 3700 doesn't support RSS features yet */
	if (pp->neta_armada3700)
		return -EOPNOTSUPP;

3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;

	if (!indir)
		return 0;

	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);

	return 0;
}

3927 3928 3929
static void mvneta_ethtool_get_wol(struct net_device *dev,
				   struct ethtool_wolinfo *wol)
{
R
Russell King 已提交
3930
	struct mvneta_port *pp = netdev_priv(dev);
3931

R
Russell King 已提交
3932
	phylink_ethtool_get_wol(pp->phylink, wol);
3933 3934 3935 3936 3937
}

static int mvneta_ethtool_set_wol(struct net_device *dev,
				  struct ethtool_wolinfo *wol)
{
R
Russell King 已提交
3938
	struct mvneta_port *pp = netdev_priv(dev);
3939 3940
	int ret;

R
Russell King 已提交
3941
	ret = phylink_ethtool_set_wol(pp->phylink, wol);
3942 3943 3944 3945
	if (!ret)
		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);

	return ret;
3946 3947
}

3948 3949 3950 3951 3952 3953 3954
static const struct net_device_ops mvneta_netdev_ops = {
	.ndo_open            = mvneta_open,
	.ndo_stop            = mvneta_stop,
	.ndo_start_xmit      = mvneta_tx,
	.ndo_set_rx_mode     = mvneta_set_rx_mode,
	.ndo_set_mac_address = mvneta_set_mac_addr,
	.ndo_change_mtu      = mvneta_change_mtu,
3955
	.ndo_fix_features    = mvneta_fix_features,
3956
	.ndo_get_stats64     = mvneta_get_stats64,
3957
	.ndo_do_ioctl        = mvneta_ioctl,
3958 3959
};

3960
static const struct ethtool_ops mvneta_eth_tool_ops = {
R
Russell King 已提交
3961
	.nway_reset	= mvneta_ethtool_nway_reset,
3962 3963 3964 3965 3966 3967
	.get_link       = ethtool_op_get_link,
	.set_coalesce   = mvneta_ethtool_set_coalesce,
	.get_coalesce   = mvneta_ethtool_get_coalesce,
	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
	.get_ringparam  = mvneta_ethtool_get_ringparam,
	.set_ringparam	= mvneta_ethtool_set_ringparam,
R
Russell King 已提交
3968 3969 3970
	.get_strings	= mvneta_ethtool_get_strings,
	.get_ethtool_stats = mvneta_ethtool_get_stats,
	.get_sset_count	= mvneta_ethtool_get_sset_count,
3971 3972 3973 3974
	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
	.get_rxfh	= mvneta_ethtool_get_rxfh,
	.set_rxfh	= mvneta_ethtool_set_rxfh,
R
Russell King 已提交
3975
	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
3976
	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
3977 3978
	.get_wol        = mvneta_ethtool_get_wol,
	.set_wol        = mvneta_ethtool_set_wol,
3979 3980 3981
};

/* Initialize hw */
3982
static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3983 3984 3985 3986 3987 3988 3989 3990 3991
{
	int queue;

	/* Disable port */
	mvneta_port_disable(pp);

	/* Set port default values */
	mvneta_defaults_set(pp);

3992
	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003
	if (!pp->txqs)
		return -ENOMEM;

	/* Initialize TX descriptor rings */
	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
		txq->id = queue;
		txq->size = pp->tx_ring_size;
		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
	}

4004
	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4005
	if (!pp->rxqs)
4006 4007 4008 4009 4010 4011 4012 4013 4014
		return -ENOMEM;

	/* Create Rx descriptor rings */
	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
		rxq->id = queue;
		rxq->size = pp->rx_ring_size;
		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
		rxq->time_coal = MVNETA_RX_COAL_USEC;
4015 4016 4017 4018 4019
		rxq->buf_virt_addr
			= devm_kmalloc_array(pp->dev->dev.parent,
					     rxq->size,
					     sizeof(*rxq->buf_virt_addr),
					     GFP_KERNEL);
4020 4021
		if (!rxq->buf_virt_addr)
			return -ENOMEM;
4022 4023 4024 4025 4026 4027
	}

	return 0;
}

/* platform glue : initialize decoding windows */
G
Greg KH 已提交
4028 4029
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
				     const struct mbus_dram_target_info *dram)
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045
{
	u32 win_enable;
	u32 win_protect;
	int i;

	for (i = 0; i < 6; i++) {
		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);

		if (i < 4)
			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
	}

	win_enable = 0x3f;
	win_protect = 0;

4046 4047 4048 4049 4050 4051 4052 4053
	if (dram) {
		for (i = 0; i < dram->num_cs; i++) {
			const struct mbus_dram_window *cs = dram->cs + i;

			mvreg_write(pp, MVNETA_WIN_BASE(i),
				    (cs->base & 0xffff0000) |
				    (cs->mbus_attr << 8) |
				    dram->mbus_dram_target_id);
4054

4055 4056
			mvreg_write(pp, MVNETA_WIN_SIZE(i),
				    (cs->size - 1) & 0xffff0000);
4057

4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
			win_enable &= ~(1 << i);
			win_protect |= 3 << (2 * i);
		}
	} else {
		/* For Armada3700 open default 4GB Mbus window, leaving
		 * arbitration of target/attribute to a different layer
		 * of configuration.
		 */
		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
		win_enable &= ~BIT(0);
		win_protect = 3;
4069 4070 4071
	}

	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4072
	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4073 4074 4075
}

/* Power up the port */
4076
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4077
{
4078
	u32 ctrl;
4079 4080 4081 4082

	/* MAC Cause register should be cleared */
	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);

4083
	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4084

4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
	/* Even though it might look weird, when we're configured in
	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
	 */
	switch(phy_mode) {
	case PHY_INTERFACE_MODE_QSGMII:
		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
		break;
	case PHY_INTERFACE_MODE_SGMII:
		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
		break;
	case PHY_INTERFACE_MODE_RGMII:
	case PHY_INTERFACE_MODE_RGMII_ID:
4099 4100
	case PHY_INTERFACE_MODE_RGMII_RXID:
	case PHY_INTERFACE_MODE_RGMII_TXID:
4101 4102 4103 4104 4105
		ctrl |= MVNETA_GMAC2_PORT_RGMII;
		break;
	default:
		return -EINVAL;
	}
4106 4107

	/* Cancel Port Reset */
4108 4109
	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
4110 4111 4112 4113

	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
		MVNETA_GMAC2_PORT_RESET) != 0)
		continue;
4114 4115

	return 0;
4116 4117 4118
}

/* Device initialization routine */
G
Greg KH 已提交
4119
static int mvneta_probe(struct platform_device *pdev)
4120
{
4121
	struct resource *res;
4122
	struct device_node *dn = pdev->dev.of_node;
4123
	struct device_node *bm_node;
4124 4125
	struct mvneta_port *pp;
	struct net_device *dev;
R
Russell King 已提交
4126
	struct phylink *phylink;
4127 4128 4129
	const char *dt_mac_addr;
	char hw_mac_addr[ETH_ALEN];
	const char *mac_from;
4130
	int tx_csum_limit;
4131 4132
	int phy_mode;
	int err;
4133
	int cpu;
4134

4135
	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
	if (!dev)
		return -ENOMEM;

	dev->irq = irq_of_parse_and_map(dn, 0);
	if (dev->irq == 0) {
		err = -EINVAL;
		goto err_free_netdev;
	}

	phy_mode = of_get_phy_mode(dn);
	if (phy_mode < 0) {
		dev_err(&pdev->dev, "incorrect phy-mode\n");
		err = -EINVAL;
R
Russell King 已提交
4149 4150 4151 4152 4153 4154 4155 4156
		goto err_free_irq;
	}

	phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
				 &mvneta_phylink_ops);
	if (IS_ERR(phylink)) {
		err = PTR_ERR(phylink);
		goto err_free_irq;
4157 4158 4159 4160 4161 4162
	}

	dev->tx_queue_len = MVNETA_MAX_TXD;
	dev->watchdog_timeo = 5 * HZ;
	dev->netdev_ops = &mvneta_netdev_ops;

4163
	dev->ethtool_ops = &mvneta_eth_tool_ops;
4164 4165

	pp = netdev_priv(dev);
4166
	spin_lock_init(&pp->lock);
R
Russell King 已提交
4167
	pp->phylink = phylink;
4168
	pp->phy_interface = phy_mode;
R
Russell King 已提交
4169
	pp->dn = dn;
4170

4171 4172
	pp->rxq_def = rxq_def;

4173 4174 4175 4176 4177 4178 4179
	/* Set RX packet offset correction for platforms, whose
	 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
	 * platforms and 0B for 32-bit ones.
	 */
	pp->rx_offset_correction =
		max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);

4180 4181
	pp->indir[0] = rxq_def;

4182 4183 4184 4185
	/* Get special SoC configurations */
	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
		pp->neta_armada3700 = true;

4186 4187 4188
	pp->clk = devm_clk_get(&pdev->dev, "core");
	if (IS_ERR(pp->clk))
		pp->clk = devm_clk_get(&pdev->dev, NULL);
T
Thomas Petazzoni 已提交
4189 4190
	if (IS_ERR(pp->clk)) {
		err = PTR_ERR(pp->clk);
R
Russell King 已提交
4191
		goto err_free_phylink;
T
Thomas Petazzoni 已提交
4192 4193 4194 4195
	}

	clk_prepare_enable(pp->clk);

4196 4197 4198 4199
	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
	if (!IS_ERR(pp->clk_bus))
		clk_prepare_enable(pp->clk_bus);

4200 4201 4202 4203
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pp->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(pp->base)) {
		err = PTR_ERR(pp->base);
4204 4205 4206
		goto err_clk;
	}

4207 4208 4209 4210 4211 4212 4213
	/* Alloc per-cpu port structure */
	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
	if (!pp->ports) {
		err = -ENOMEM;
		goto err_clk;
	}

4214
	/* Alloc per-cpu stats */
4215
	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4216 4217
	if (!pp->stats) {
		err = -ENOMEM;
4218
		goto err_free_ports;
4219 4220
	}

4221
	dt_mac_addr = of_get_mac_address(dn);
4222
	if (dt_mac_addr) {
4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235
		mac_from = "device tree";
		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
	} else {
		mvneta_get_mac_addr(pp, hw_mac_addr);
		if (is_valid_ether_addr(hw_mac_addr)) {
			mac_from = "hardware";
			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
		} else {
			mac_from = "random";
			eth_hw_addr_random(dev);
		}
	}

4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
		if (tx_csum_limit < 0 ||
		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
			dev_info(&pdev->dev,
				 "Wrong TX csum limit in DT, set to %dB\n",
				 MVNETA_TX_CSUM_DEF_SIZE);
		}
	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
	} else {
		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
	}

	pp->tx_csum_limit = tx_csum_limit;
4251

4252
	pp->dram_target_info = mv_mbus_dram_info();
4253 4254 4255 4256
	/* Armada3700 requires setting default configuration of Mbus
	 * windows, however without using filled mbus_dram_target_info
	 * structure.
	 */
4257 4258
	if (pp->dram_target_info || pp->neta_armada3700)
		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4259

4260 4261 4262 4263 4264 4265
	pp->tx_ring_size = MVNETA_MAX_TXD;
	pp->rx_ring_size = MVNETA_MAX_RXD;

	pp->dev = dev;
	SET_NETDEV_DEV(dev, &pdev->dev);

4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277
	pp->id = global_port_id++;

	/* Obtain access to BM resources if enabled and already initialized */
	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
	if (bm_node && bm_node->data) {
		pp->bm_priv = bm_node->data;
		err = mvneta_bm_port_init(pdev, pp);
		if (err < 0) {
			dev_info(&pdev->dev, "use SW buffer management\n");
			pp->bm_priv = NULL;
		}
	}
4278
	of_node_put(bm_node);
4279

4280 4281
	err = mvneta_init(&pdev->dev, pp);
	if (err < 0)
4282
		goto err_netdev;
4283 4284 4285 4286

	err = mvneta_port_power_up(pp, phy_mode);
	if (err < 0) {
		dev_err(&pdev->dev, "can't power up port\n");
4287
		goto err_netdev;
4288
	}
4289

4290 4291 4292 4293 4294 4295 4296 4297 4298
	/* Armada3700 network controller does not support per-cpu
	 * operation, so only single NAPI should be initialized.
	 */
	if (pp->neta_armada3700) {
		netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
	} else {
		for_each_present_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
4299

4300 4301 4302 4303
			netif_napi_add(dev, &port->napi, mvneta_poll,
				       NAPI_POLL_WEIGHT);
			port->pp = pp;
		}
4304
	}
4305

4306
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO;
4307 4308
	dev->hw_features |= dev->features;
	dev->vlan_features |= dev->features;
4309
	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4310
	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4311

4312 4313 4314 4315 4316
	/* MTU range: 68 - 9676 */
	dev->min_mtu = ETH_MIN_MTU;
	/* 9676 == 9700 - 20 and rounding to 8 */
	dev->max_mtu = 9676;

4317 4318 4319
	err = register_netdev(dev);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to register\n");
4320
		goto err_free_stats;
4321 4322
	}

4323 4324
	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
		    dev->dev_addr);
4325 4326 4327 4328 4329

	platform_set_drvdata(pdev, pp->dev);

	return 0;

4330 4331 4332 4333 4334 4335 4336
err_netdev:
	unregister_netdev(dev);
	if (pp->bm_priv) {
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
				       1 << pp->id);
	}
4337 4338
err_free_stats:
	free_percpu(pp->stats);
4339 4340
err_free_ports:
	free_percpu(pp->ports);
4341
err_clk:
4342
	clk_disable_unprepare(pp->clk_bus);
4343
	clk_disable_unprepare(pp->clk);
R
Russell King 已提交
4344 4345 4346
err_free_phylink:
	if (pp->phylink)
		phylink_destroy(pp->phylink);
4347 4348 4349 4350 4351 4352 4353 4354
err_free_irq:
	irq_dispose_mapping(dev->irq);
err_free_netdev:
	free_netdev(dev);
	return err;
}

/* Device removal routine */
G
Greg KH 已提交
4355
static int mvneta_remove(struct platform_device *pdev)
4356 4357 4358 4359 4360
{
	struct net_device  *dev = platform_get_drvdata(pdev);
	struct mvneta_port *pp = netdev_priv(dev);

	unregister_netdev(dev);
4361
	clk_disable_unprepare(pp->clk_bus);
T
Thomas Petazzoni 已提交
4362
	clk_disable_unprepare(pp->clk);
4363
	free_percpu(pp->ports);
4364
	free_percpu(pp->stats);
4365
	irq_dispose_mapping(dev->irq);
R
Russell King 已提交
4366
	phylink_destroy(pp->phylink);
4367 4368
	free_netdev(dev);

4369 4370 4371 4372 4373 4374
	if (pp->bm_priv) {
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
				       1 << pp->id);
	}

4375 4376 4377
	return 0;
}

4378 4379 4380 4381 4382 4383
#ifdef CONFIG_PM_SLEEP
static int mvneta_suspend(struct device *device)
{
	struct net_device *dev = dev_get_drvdata(device);
	struct mvneta_port *pp = netdev_priv(dev);

4384
	rtnl_lock();
4385 4386
	if (netif_running(dev))
		mvneta_stop(dev);
4387
	rtnl_unlock();
4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420
	netif_device_detach(dev);
	clk_disable_unprepare(pp->clk_bus);
	clk_disable_unprepare(pp->clk);
	return 0;
}

static int mvneta_resume(struct device *device)
{
	struct platform_device *pdev = to_platform_device(device);
	struct net_device *dev = dev_get_drvdata(device);
	struct mvneta_port *pp = netdev_priv(dev);
	int err;

	clk_prepare_enable(pp->clk);
	if (!IS_ERR(pp->clk_bus))
		clk_prepare_enable(pp->clk_bus);
	if (pp->dram_target_info || pp->neta_armada3700)
		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
	if (pp->bm_priv) {
		err = mvneta_bm_port_init(pdev, pp);
		if (err < 0) {
			dev_info(&pdev->dev, "use SW buffer management\n");
			pp->bm_priv = NULL;
		}
	}
	mvneta_defaults_set(pp);
	err = mvneta_port_power_up(pp, pp->phy_interface);
	if (err < 0) {
		dev_err(device, "can't power up port\n");
		return err;
	}

	netif_device_attach(dev);
4421
	rtnl_lock();
4422
	if (netif_running(dev)) {
4423
		mvneta_open(dev);
4424 4425
		mvneta_set_rx_mode(dev);
	}
4426
	rtnl_unlock();
4427

4428 4429 4430 4431 4432 4433
	return 0;
}
#endif

static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);

4434 4435
static const struct of_device_id mvneta_match[] = {
	{ .compatible = "marvell,armada-370-neta" },
4436
	{ .compatible = "marvell,armada-xp-neta" },
4437
	{ .compatible = "marvell,armada-3700-neta" },
4438 4439 4440 4441 4442 4443
	{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);

static struct platform_driver mvneta_driver = {
	.probe = mvneta_probe,
G
Greg KH 已提交
4444
	.remove = mvneta_remove,
4445 4446 4447
	.driver = {
		.name = MVNETA_DRIVER_NAME,
		.of_match_table = mvneta_match,
4448
		.pm = &mvneta_pm_ops,
4449 4450 4451
	},
};

4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487
static int __init mvneta_driver_init(void)
{
	int ret;

	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
				      mvneta_cpu_online,
				      mvneta_cpu_down_prepare);
	if (ret < 0)
		goto out;
	online_hpstate = ret;
	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
				      NULL, mvneta_cpu_dead);
	if (ret)
		goto err_dead;

	ret = platform_driver_register(&mvneta_driver);
	if (ret)
		goto err;
	return 0;

err:
	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
err_dead:
	cpuhp_remove_multi_state(online_hpstate);
out:
	return ret;
}
module_init(mvneta_driver_init);

static void __exit mvneta_driver_exit(void)
{
	platform_driver_unregister(&mvneta_driver);
	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
	cpuhp_remove_multi_state(online_hpstate);
}
module_exit(mvneta_driver_exit);
4488 4489 4490 4491 4492 4493 4494 4495 4496

MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_LICENSE("GPL");

module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);

module_param(rxq_def, int, S_IRUGO);
4497
module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);