mvneta.c 148.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
 *
 * Copyright (C) 2012 Marvell
 *
 * Rami Rosen <rosenr@marvell.com>
 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2. This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 */

14 15
#include <linux/clk.h>
#include <linux/cpu.h>
16
#include <linux/etherdevice.h>
17
#include <linux/if_vlan.h>
18 19
#include <linux/inetdevice.h>
#include <linux/interrupt.h>
20
#include <linux/io.h>
21 22 23 24
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
#include <linux/netdevice.h>
25
#include <linux/of.h>
26
#include <linux/of_address.h>
27 28 29
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
30
#include <linux/phy/phy.h>
31
#include <linux/phy.h>
R
Russell King 已提交
32
#include <linux/phylink.h>
33 34
#include <linux/platform_device.h>
#include <linux/skbuff.h>
35
#include <net/hwbm.h>
36
#include "mvneta_bm.h"
37 38 39
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
40
#include <net/page_pool.h>
41
#include <linux/bpf_trace.h>
42 43 44

/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
45
#define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
46 47 48 49
#define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT	4
#define      MVNETA_RXQ_SHORT_POOL_ID_MASK	0x30
#define      MVNETA_RXQ_LONG_POOL_ID_SHIFT	6
#define      MVNETA_RXQ_LONG_POOL_ID_MASK	0xc0
50 51 52 53 54 55 56 57 58 59 60 61 62
#define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
#define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
#define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
#define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
#define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
#define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
#define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
#define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
#define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
#define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
#define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
#define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
#define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
63 64 65
#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)	(0x1700 + ((pool) << 2))
#define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT	3
#define      MVNETA_PORT_POOL_BUFFER_SZ_MASK	0xfff8
66 67 68 69 70 71 72 73 74 75 76 77
#define MVNETA_PORT_RX_RESET                    0x1cc0
#define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
#define MVNETA_PHY_ADDR                         0x2000
#define      MVNETA_PHY_ADDR_MASK               0x1f
#define MVNETA_MBUS_RETRY                       0x2010
#define MVNETA_UNIT_INTR_CAUSE                  0x2080
#define MVNETA_UNIT_CONTROL                     0x20B0
#define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
#define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
#define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE                 0x2290
78
#define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
#define MVNETA_PORT_CONFIG                      0x2400
#define      MVNETA_UNI_PROMISC_MODE            BIT(0)
#define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
#define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
#define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
#define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
#define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
#define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
#define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
#define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
						 MVNETA_DEF_RXQ_ARP(q)	 | \
						 MVNETA_DEF_RXQ_TCP(q)	 | \
						 MVNETA_DEF_RXQ_UDP(q)	 | \
						 MVNETA_DEF_RXQ_BPDU(q)	 | \
						 MVNETA_TX_UNSET_ERR_SUM | \
						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
#define MVNETA_PORT_CONFIG_EXTEND                0x2404
#define MVNETA_MAC_ADDR_LOW                      0x2414
#define MVNETA_MAC_ADDR_HIGH                     0x2418
#define MVNETA_SDMA_CONFIG                       0x241c
#define      MVNETA_SDMA_BRST_SIZE_16            4
#define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
#define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
#define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
103
#define      MVNETA_DESC_SWAP                    BIT(6)
104 105
#define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
#define MVNETA_PORT_STATUS                       0x2444
106
#define      MVNETA_TX_IN_PRGRS                  BIT(0)
107 108
#define      MVNETA_TX_FIFO_EMPTY                BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
109
/* Only exists on Armada XP and Armada 370 */
110
#define MVNETA_SERDES_CFG			 0x24A0
111
#define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
112
#define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
113
#define      MVNETA_HSGMII_SERDES_PROTO		 0x1107
114 115 116 117 118 119
#define MVNETA_TYPE_PRIO                         0x24bc
#define      MVNETA_FORCE_UNI                    BIT(21)
#define MVNETA_TXQ_CMD_1                         0x24e4
#define MVNETA_TXQ_CMD                           0x2448
#define      MVNETA_TXQ_DISABLE_SHIFT            8
#define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
120 121
#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
122 123
#define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
#define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
124
#define MVNETA_ACC_MODE                          0x2500
125
#define MVNETA_BM_ADDRESS                        0x2504
126 127 128
#define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
#define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
#define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
129
#define      MVNETA_CPU_RXQ_ACCESS(rxq)		 BIT(rxq)
130
#define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
131
#define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
132

133 134 135 136 137 138 139
/* Exception Interrupt Port/Queue Cause register
 *
 * Their behavior depend of the mapping done using the PCPX2Q
 * registers. For a given CPU if the bit associated to a queue is not
 * set, then for the register a read from this CPU will always return
 * 0 and a write won't do anything
 */
140

141 142
#define MVNETA_INTR_NEW_CAUSE                    0x25a0
#define MVNETA_INTR_NEW_MASK                     0x25a4
143 144 145 146 147 148 149 150 151 152 153 154

/* bits  0..7  = TXQ SENT, one bit per queue.
 * bits  8..15 = RXQ OCCUP, one bit per queue.
 * bits 16..23 = RXQ FREE, one bit per queue.
 * bit  29 = OLD_REG_SUM, see old reg ?
 * bit  30 = TX_ERR_SUM, one bit for 4 ports
 * bit  31 = MISC_SUM,   one bit for 4 ports
 */
#define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
#define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
#define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
#define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
155
#define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
156

157 158
#define MVNETA_INTR_OLD_CAUSE                    0x25a8
#define MVNETA_INTR_OLD_MASK                     0x25ac
159 160

/* Data Path Port/Queue Cause Register */
161 162
#define MVNETA_INTR_MISC_CAUSE                   0x25b0
#define MVNETA_INTR_MISC_MASK                    0x25b4
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184

#define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
#define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
#define      MVNETA_CAUSE_PTP                    BIT(4)

#define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
#define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
#define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
#define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
#define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
#define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
#define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
#define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)

#define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
#define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
#define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))

#define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
#define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
#define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))

185 186
#define MVNETA_INTR_ENABLE                       0x25b8
#define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
187
#define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
188

189 190 191 192 193 194 195 196
#define MVNETA_RXQ_CMD                           0x2680
#define      MVNETA_RXQ_DISABLE_SHIFT            8
#define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
#define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
#define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
#define MVNETA_GMAC_CTRL_0                       0x2c00
#define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
#define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
197
#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
198 199
#define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
#define MVNETA_GMAC_CTRL_2                       0x2c08
200
#define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
201
#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
202 203 204 205 206 207 208 209 210 211 212
#define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
#define      MVNETA_GMAC2_PORT_RESET             BIT(6)
#define MVNETA_GMAC_STATUS                       0x2c10
#define      MVNETA_GMAC_LINK_UP                 BIT(0)
#define      MVNETA_GMAC_SPEED_1000              BIT(1)
#define      MVNETA_GMAC_SPEED_100               BIT(2)
#define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
#define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
#define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
#define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
#define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
R
Russell King 已提交
213 214
#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
215 216 217
#define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
#define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
#define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
218
#define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
219 220
#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
221 222
#define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
#define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
223
#define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
224 225
#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
226
#define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
227
#define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
228
#define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
229 230
#define MVNETA_GMAC_CTRL_4                       0x2c90
#define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
231
#define MVNETA_MIB_COUNTERS_BASE                 0x3000
232 233 234 235 236 237 238 239 240 241
#define      MVNETA_MIB_LATE_COLLISION           0x7c
#define MVNETA_DA_FILT_SPEC_MCAST                0x3400
#define MVNETA_DA_FILT_OTH_MCAST                 0x3500
#define MVNETA_DA_FILT_UCAST_BASE                0x3600
#define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
#define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
#define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
#define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
#define      MVNETA_TXQ_DEC_SENT_SHIFT           16
242
#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
243 244 245 246 247 248 249 250 251 252 253
#define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
#define      MVNETA_TXQ_SENT_DESC_SHIFT          16
#define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
#define MVNETA_PORT_TX_RESET                     0x3cf0
#define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
#define MVNETA_TX_MTU                            0x3e0c
#define MVNETA_TX_TOKEN_SIZE                     0x3e14
#define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
#define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff

R
Russell King 已提交
254 255 256 257 258 259
#define MVNETA_LPI_CTRL_0                        0x2cc0
#define MVNETA_LPI_CTRL_1                        0x2cc4
#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
#define MVNETA_LPI_CTRL_2                        0x2cc8
#define MVNETA_LPI_STATUS                        0x2ccc

260 261 262 263 264 265 266 267 268
#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff

/* Descriptor ring Macros */
#define MVNETA_QUEUE_NEXT_DESC(q, index)	\
	(((index) < (q)->last_desc) ? ((index) + 1) : 0)

/* Various constants */

/* Coalescing */
269
#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
270 271 272
#define MVNETA_RX_COAL_PKTS		32
#define MVNETA_RX_COAL_USEC		100

273
/* The two bytes Marvell header. Either contains a special value used
274 275 276 277 278 279 280 281 282 283 284
 * by Marvell switches when a specific hardware mode is enabled (not
 * supported by this driver) or is filled automatically by zeroes on
 * the RX side. Those two bytes being at the front of the Ethernet
 * header, they allow to have the IP header aligned on a 4 bytes
 * boundary automatically: the hardware skips those two bytes on its
 * own.
 */
#define MVNETA_MH_SIZE			2

#define MVNETA_VLAN_TAG_LEN             4

285
#define MVNETA_TX_CSUM_DEF_SIZE		1600
286
#define MVNETA_TX_CSUM_MAX_SIZE		9800
287 288 289 290
#define MVNETA_ACC_MODE_EXT1		1
#define MVNETA_ACC_MODE_EXT2		2

#define MVNETA_MAX_DECODE_WIN		6
291 292 293 294 295 296 297 298

/* Timeout constants */
#define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
#define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
#define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000

#define MVNETA_TX_MTU_MAX		0x3ffff

299 300 301 302 303
/* The RSS lookup table actually has 256 entries but we do not use
 * them yet
 */
#define MVNETA_RSS_LU_TABLE_SIZE	1

304
/* Max number of Rx descriptors */
305
#define MVNETA_MAX_RXD 512
306 307

/* Max number of Tx descriptors */
308
#define MVNETA_MAX_TXD 1024
309

310 311 312 313 314
/* Max number of allowed TCP segments for software TSO */
#define MVNETA_MAX_TSO_SEGS 100

#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)

315 316 317
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE	32

318 319 320 321 322 323
/* Number of bytes to be taken into account by HW when putting incoming data
 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 */
#define MVNETA_RX_PKT_OFFSET_CORRECTION		64

324 325 326
#define MVNETA_RX_PKT_SIZE(mtu) \
	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
	      ETH_HLEN + ETH_FCS_LEN,			     \
327
	      cache_line_size())
328

329
/* Driver assumes that the last 3 bits are 0 */
330
#define MVNETA_SKB_HEADROOM	ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
331
#define MVNETA_SKB_PAD	(SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
332
			 MVNETA_SKB_HEADROOM))
333 334
#define MVNETA_MAX_RX_BUF_SIZE	(PAGE_SIZE - MVNETA_SKB_PAD)

335 336 337 338
#define IS_TSO_HEADER(txq, addr) \
	((addr >= txq->tso_hdrs_phys) && \
	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))

339 340
#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
341

R
Russell King 已提交
342 343
enum {
	ETHTOOL_STAT_EEE_WAKEUP,
344 345
	ETHTOOL_STAT_SKB_ALLOC_ERR,
	ETHTOOL_STAT_REFILL_ERR,
346 347 348 349
	ETHTOOL_XDP_REDIRECT,
	ETHTOOL_XDP_PASS,
	ETHTOOL_XDP_DROP,
	ETHTOOL_XDP_TX,
350 351 352
	ETHTOOL_XDP_TX_ERR,
	ETHTOOL_XDP_XMIT,
	ETHTOOL_XDP_XMIT_ERR,
R
Russell King 已提交
353 354 355
	ETHTOOL_MAX_STATS,
};

R
Russell King 已提交
356 357 358 359 360 361 362 363
struct mvneta_statistic {
	unsigned short offset;
	unsigned short type;
	const char name[ETH_GSTRING_LEN];
};

#define T_REG_32	32
#define T_REG_64	64
R
Russell King 已提交
364
#define T_SW		1
R
Russell King 已提交
365

366 367 368 369
#define MVNETA_XDP_PASS		0
#define MVNETA_XDP_DROPPED	BIT(0)
#define MVNETA_XDP_TX		BIT(1)
#define MVNETA_XDP_REDIR	BIT(2)
370

R
Russell King 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
static const struct mvneta_statistic mvneta_statistics[] = {
	{ 0x3000, T_REG_64, "good_octets_received", },
	{ 0x3010, T_REG_32, "good_frames_received", },
	{ 0x3008, T_REG_32, "bad_octets_received", },
	{ 0x3014, T_REG_32, "bad_frames_received", },
	{ 0x3018, T_REG_32, "broadcast_frames_received", },
	{ 0x301c, T_REG_32, "multicast_frames_received", },
	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
	{ 0x3058, T_REG_32, "good_fc_received", },
	{ 0x305c, T_REG_32, "bad_fc_received", },
	{ 0x3060, T_REG_32, "undersize_received", },
	{ 0x3064, T_REG_32, "fragments_received", },
	{ 0x3068, T_REG_32, "oversize_received", },
	{ 0x306c, T_REG_32, "jabber_received", },
	{ 0x3070, T_REG_32, "mac_receive_error", },
	{ 0x3074, T_REG_32, "bad_crc_event", },
	{ 0x3078, T_REG_32, "collision", },
	{ 0x307c, T_REG_32, "late_collision", },
	{ 0x2484, T_REG_32, "rx_discard", },
	{ 0x2488, T_REG_32, "rx_overrun", },
	{ 0x3020, T_REG_32, "frames_64_octets", },
	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
	{ 0x3038, T_REG_64, "good_octets_sent", },
	{ 0x3040, T_REG_32, "good_frames_sent", },
	{ 0x3044, T_REG_32, "excessive_collision", },
	{ 0x3048, T_REG_32, "multicast_frames_sent", },
	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
	{ 0x3054, T_REG_32, "fc_sent", },
	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
R
Russell King 已提交
404
	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
405 406
	{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
	{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
407 408 409 410
	{ ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
	{ ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
	{ ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
	{ ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
411
	{ ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
412
	{ ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
413
	{ ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
R
Russell King 已提交
414 415
};

416 417 418 419 420
struct mvneta_stats {
	u64	rx_packets;
	u64	rx_bytes;
	u64	tx_packets;
	u64	tx_bytes;
421 422 423 424
	/* xdp */
	u64	xdp_redirect;
	u64	xdp_pass;
	u64	xdp_drop;
425
	u64	xdp_xmit;
426
	u64	xdp_xmit_err;
427
	u64	xdp_tx;
428
	u64	xdp_tx_err;
429 430
};

431
struct mvneta_ethtool_stats {
432
	struct mvneta_stats ps;
433 434 435 436
	u64	skb_alloc_error;
	u64	refill_error;
};

437
struct mvneta_pcpu_stats {
438 439 440
	struct u64_stats_sync syncp;

	struct mvneta_ethtool_stats es;
441 442
	u64	rx_dropped;
	u64	rx_errors;
443 444
};

445 446 447 448 449 450 451 452 453 454 455
struct mvneta_pcpu_port {
	/* Pointer to the shared port */
	struct mvneta_port	*pp;

	/* Pointer to the CPU-local NAPI struct */
	struct napi_struct	napi;

	/* Cause of the previous interrupt */
	u32			cause_rx_tx;
};

456 457 458 459
enum {
	__MVNETA_DOWN,
};

460
struct mvneta_port {
461
	u8 id;
462 463 464
	struct mvneta_pcpu_port __percpu	*ports;
	struct mvneta_pcpu_stats __percpu	*stats;

465 466
	unsigned long state;

467 468 469 470 471
	int pkt_size;
	void __iomem *base;
	struct mvneta_rx_queue *rxqs;
	struct mvneta_tx_queue *txqs;
	struct net_device *dev;
472 473
	struct hlist_node node_online;
	struct hlist_node node_dead;
474
	int rxq_def;
475 476 477 478
	/* Protect the access to the percpu interrupt registers,
	 * ensuring that the configuration remains coherent.
	 */
	spinlock_t lock;
479
	bool is_stopped;
480

481 482 483
	u32 cause_rx_tx;
	struct napi_struct napi;

484 485
	struct bpf_prog *xdp_prog;

486
	/* Core clock */
T
Thomas Petazzoni 已提交
487
	struct clk *clk;
488 489
	/* AXI clock */
	struct clk *clk_bus;
490 491 492 493 494
	u8 mcast_count[256];
	u16 tx_ring_size;
	u16 rx_ring_size;

	phy_interface_t phy_interface;
R
Russell King 已提交
495
	struct device_node *dn;
496
	unsigned int tx_csum_limit;
R
Russell King 已提交
497
	struct phylink *phylink;
498
	struct phylink_config phylink_config;
499
	struct phy *comphy;
R
Russell King 已提交
500

501 502 503 504 505
	struct mvneta_bm *bm_priv;
	struct mvneta_bm_pool *pool_long;
	struct mvneta_bm_pool *pool_short;
	int bm_win_id;

R
Russell King 已提交
506 507 508 509
	bool eee_enabled;
	bool eee_active;
	bool tx_lpi_enabled;

R
Russell King 已提交
510
	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
511 512

	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
513 514 515

	/* Flags for special SoC configurations */
	bool neta_armada3700;
516
	u16 rx_offset_correction;
517
	const struct mbus_dram_target_info *dram_target_info;
518 519
};

520
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
521 522 523
 * layout of the transmit and reception DMA descriptors, and their
 * layout is therefore defined by the hardware design
 */
524

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
#define MVNETA_TX_L3_OFF_SHIFT	0
#define MVNETA_TX_IP_HLEN_SHIFT	8
#define MVNETA_TX_L4_UDP	BIT(16)
#define MVNETA_TX_L3_IP6	BIT(17)
#define MVNETA_TXD_IP_CSUM	BIT(18)
#define MVNETA_TXD_Z_PAD	BIT(19)
#define MVNETA_TXD_L_DESC	BIT(20)
#define MVNETA_TXD_F_DESC	BIT(21)
#define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
				 MVNETA_TXD_L_DESC | \
				 MVNETA_TXD_F_DESC)
#define MVNETA_TX_L4_CSUM_FULL	BIT(30)
#define MVNETA_TX_L4_CSUM_NOT	BIT(31)

#define MVNETA_RXD_ERR_CRC		0x0
540 541
#define MVNETA_RXD_BM_POOL_SHIFT	13
#define MVNETA_RXD_BM_POOL_MASK		(BIT(13) | BIT(14))
542 543 544 545 546 547
#define MVNETA_RXD_ERR_SUMMARY		BIT(16)
#define MVNETA_RXD_ERR_OVERRUN		BIT(17)
#define MVNETA_RXD_ERR_LEN		BIT(18)
#define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
#define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
#define MVNETA_RXD_L3_IP4		BIT(25)
548 549 550 551
#define MVNETA_RXD_LAST_DESC		BIT(26)
#define MVNETA_RXD_FIRST_DESC		BIT(27)
#define MVNETA_RXD_FIRST_LAST_DESC	(MVNETA_RXD_FIRST_DESC | \
					 MVNETA_RXD_LAST_DESC)
552 553
#define MVNETA_RXD_L4_CSUM_OK		BIT(30)

554
#if defined(__LITTLE_ENDIAN)
555 556
struct mvneta_tx_desc {
	u32  command;		/* Options used by HW for packet transmitting.*/
A
Alexandre Belloni 已提交
557
	u16  reserved1;		/* csum_l4 (for future use)		*/
558 559 560 561 562 563 564 565
	u16  data_size;		/* Data size of transmitted packet in bytes */
	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
	u32  reserved3[4];	/* Reserved - (for future use)		*/
};

struct mvneta_rx_desc {
	u32  status;		/* Info about received packet		*/
566 567
	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
	u16  data_size;		/* Size of received packet in bytes	*/
568

569 570
	u32  buf_phys_addr;	/* Physical address of the buffer	*/
	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
571

572 573 574
	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
	u16  reserved3;		/* prefetch_cmd, for future use		*/
	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
575

576 577 578
	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
};
579 580 581
#else
struct mvneta_tx_desc {
	u16  data_size;		/* Data size of transmitted packet in bytes */
A
Alexandre Belloni 已提交
582
	u16  reserved1;		/* csum_l4 (for future use)		*/
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	u32  command;		/* Options used by HW for packet transmitting.*/
	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
	u32  reserved3[4];	/* Reserved - (for future use)		*/
};

struct mvneta_rx_desc {
	u16  data_size;		/* Size of received packet in bytes	*/
	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
	u32  status;		/* Info about received packet		*/

	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
	u32  buf_phys_addr;	/* Physical address of the buffer	*/

	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
	u16  reserved3;		/* prefetch_cmd, for future use		*/
	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */

	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
};
#endif
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619
enum mvneta_tx_buf_type {
	MVNETA_TYPE_SKB,
	MVNETA_TYPE_XDP_TX,
	MVNETA_TYPE_XDP_NDO,
};

struct mvneta_tx_buf {
	enum mvneta_tx_buf_type type;
	union {
		struct xdp_frame *xdpf;
		struct sk_buff *skb;
	};
};

620 621 622 623 624 625 626 627
struct mvneta_tx_queue {
	/* Number of this TX queue, in the range 0-7 */
	u8 id;

	/* Number of TX DMA descriptors in the descriptor ring */
	int size;

	/* Number of currently used TX DMA descriptor in the
628 629
	 * descriptor ring
	 */
630
	int count;
631
	int pending;
632 633
	int tx_stop_threshold;
	int tx_wake_threshold;
634

635 636
	/* Array of transmitted buffers */
	struct mvneta_tx_buf *buf;
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656

	/* Index of last TX DMA descriptor that was inserted */
	int txq_put_index;

	/* Index of the TX DMA descriptor to be cleaned up */
	int txq_get_index;

	u32 done_pkts_coal;

	/* Virtual address of the TX DMA descriptors array */
	struct mvneta_tx_desc *descs;

	/* DMA address of the TX DMA descriptors array */
	dma_addr_t descs_phys;

	/* Index of the last TX DMA descriptor */
	int last_desc;

	/* Index of the next TX DMA descriptor to process */
	int next_desc_to_proc;
657 658 659 660 661 662

	/* DMA buffers for TSO headers */
	char *tso_hdrs;

	/* DMA address of TSO headers */
	dma_addr_t tso_hdrs_phys;
663 664 665

	/* Affinity mask for CPUs*/
	cpumask_t affinity_mask;
666 667 668 669 670 671 672 673 674 675 676 677
};

struct mvneta_rx_queue {
	/* rx queue number, in the range 0-7 */
	u8 id;

	/* num of rx descriptors in the rx descriptor ring */
	int size;

	u32 pkts_coal;
	u32 time_coal;

678 679 680 681
	/* page_pool */
	struct page_pool *page_pool;
	struct xdp_rxq_info xdp_rxq;

682 683 684
	/* Virtual address of the RX buffer */
	void  **buf_virt_addr;

685 686 687 688 689 690 691 692 693 694 695
	/* Virtual address of the RX DMA descriptors array */
	struct mvneta_rx_desc *descs;

	/* DMA address of the RX DMA descriptors array */
	dma_addr_t descs_phys;

	/* Index of the last RX DMA descriptor */
	int last_desc;

	/* Index of the next RX DMA descriptor to process */
	int next_desc_to_proc;
696

697 698 699
	/* Index of first RX DMA descriptor to refill */
	int first_to_refill;
	u32 refill_num;
700 701
};

702
static enum cpuhp_state online_hpstate;
703 704 705
/* The hardware supports eight (8) rx queues, but we are only allowing
 * the first one to be used. Therefore, let's just allocate one queue.
 */
706
static int rxq_number = 8;
707 708 709 710
static int txq_number = 8;

static int rxq_def;

711 712
static int rx_copybreak __read_mostly = 256;

713 714 715
/* HW BM need that each port be identify by a unique ID */
static int global_port_id;

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"

/* Utility/helper methods */

/* Write helper method */
static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
{
	writel(data, pp->base + offset);
}

/* Read helper method */
static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
{
	return readl(pp->base + offset);
}

/* Increment txq get counter */
static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
{
	txq->txq_get_index++;
	if (txq->txq_get_index == txq->size)
		txq->txq_get_index = 0;
}

/* Increment txq put counter */
static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
{
	txq->txq_put_index++;
	if (txq->txq_put_index == txq->size)
		txq->txq_put_index = 0;
}


/* Clear all MIB counters */
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
	int i;

	/* Perform dummy reads from MIB counters */
	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
757 758 759
		mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
	mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
	mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
760 761 762
}

/* Get System Network Statistics */
763
static void
764 765
mvneta_get_stats64(struct net_device *dev,
		   struct rtnl_link_stats64 *stats)
766 767 768
{
	struct mvneta_port *pp = netdev_priv(dev);
	unsigned int start;
769
	int cpu;
770

771 772 773 774
	for_each_possible_cpu(cpu) {
		struct mvneta_pcpu_stats *cpu_stats;
		u64 rx_packets;
		u64 rx_bytes;
775 776
		u64 rx_dropped;
		u64 rx_errors;
777 778
		u64 tx_packets;
		u64 tx_bytes;
779

780 781
		cpu_stats = per_cpu_ptr(pp->stats, cpu);
		do {
782
			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
783 784
			rx_packets = cpu_stats->es.ps.rx_packets;
			rx_bytes   = cpu_stats->es.ps.rx_bytes;
785 786
			rx_dropped = cpu_stats->rx_dropped;
			rx_errors  = cpu_stats->rx_errors;
787 788
			tx_packets = cpu_stats->es.ps.tx_packets;
			tx_bytes   = cpu_stats->es.ps.tx_bytes;
789
		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
790

791 792
		stats->rx_packets += rx_packets;
		stats->rx_bytes   += rx_bytes;
793 794
		stats->rx_dropped += rx_dropped;
		stats->rx_errors  += rx_errors;
795 796 797
		stats->tx_packets += tx_packets;
		stats->tx_bytes   += tx_bytes;
	}
798 799 800 801 802 803

	stats->tx_dropped	= dev->stats.tx_dropped;
}

/* Rx descriptors helper methods */

804 805
/* Checks whether the RX descriptor having this status is both the first
 * and the last descriptor for the RX packet. Each RX packet is currently
806 807 808
 * received through a single RX descriptor, so not having each RX
 * descriptor with its first and last bits set is an error
 */
809
static int mvneta_rxq_desc_is_first_last(u32 status)
810
{
811
	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
812 813 814 815 816 817 818 819 820
		MVNETA_RXD_FIRST_LAST_DESC;
}

/* Add number of descriptors ready to receive new packets */
static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
					  struct mvneta_rx_queue *rxq,
					  int ndescs)
{
	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
821 822
	 * be added at once
	 */
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
	}

	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
}

/* Get number of RX descriptors occupied by received packets */
static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
					struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
}

844
/* Update num of rx desc called upon return from rx path or
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
 * from mvneta_rxq_drop_pkts().
 */
static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
				       struct mvneta_rx_queue *rxq,
				       int rx_done, int rx_filled)
{
	u32 val;

	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
		val = rx_done |
		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
		return;
	}

	/* Only 255 descriptors can be added at once */
	while ((rx_done > 0) || (rx_filled > 0)) {
		if (rx_done <= 0xff) {
			val = rx_done;
			rx_done = 0;
		} else {
			val = 0xff;
			rx_done -= 0xff;
		}
		if (rx_filled <= 0xff) {
			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
			rx_filled = 0;
		} else {
			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
			rx_filled -= 0xff;
		}
		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
	}
}

/* Get pointer to next RX descriptor to be processed by SW */
static struct mvneta_rx_desc *
mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
{
	int rx_desc = rxq->next_desc_to_proc;

	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
887
	prefetch(rxq->descs + rxq->next_desc_to_proc);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
	return rxq->descs + rx_desc;
}

/* Change maximum receive size of the port. */
static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
{
	u32 val;

	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}


/* Set rx queue offset */
static void mvneta_rxq_offset_set(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq,
				  int offset)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;

	/* Offset is in */
	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}


/* Tx descriptors helper methods */

/* Update HW with number of TX descriptors to be sent */
static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int pend_desc)
{
	u32 val;

929 930 931 932 933 934 935 936
	pend_desc += txq->pending;

	/* Only 255 Tx descriptors can be added at once */
	do {
		val = min(pend_desc, 255);
		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
		pend_desc -= val;
	} while (pend_desc > 0);
937
	txq->pending = 0;
938 939 940 941 942 943 944 945 946 947 948 949 950
}

/* Get pointer to next TX descriptor to be processed (send) by HW */
static struct mvneta_tx_desc *
mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
{
	int tx_desc = txq->next_desc_to_proc;

	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
	return txq->descs + tx_desc;
}

/* Release the last allocated TX descriptor. Useful to handle DMA
951 952
 * mapping failures in the TX path.
 */
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
{
	if (txq->next_desc_to_proc == 0)
		txq->next_desc_to_proc = txq->last_desc - 1;
	else
		txq->next_desc_to_proc--;
}

/* Set rxq buf size */
static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq,
				    int buf_size)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));

	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
}

/* Disable buffer management (BM) */
static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
				  struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
/* Enable buffer management (BM) */
static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val |= MVNETA_RXQ_HW_BUF_ALLOC;
	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Notify HW about port's assignment of pool for bigger packets */
static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
				     struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
	val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Notify HW about port's assignment of pool for smaller packets */
static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
				      struct mvneta_rx_queue *rxq)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
	val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
	val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);

	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}

/* Set port's receive buffer size for assigned BM pool */
static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
					      int buf_size,
					      u8 pool_id)
{
	u32 val;

	if (!IS_ALIGNED(buf_size, 8)) {
		dev_warn(pp->dev->dev.parent,
			 "illegal buf_size value %d, round to %d\n",
			 buf_size, ALIGN(buf_size, 8));
		buf_size = ALIGN(buf_size, 8);
	}

	val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
	val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
	mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
}

/* Configure MBUS window in order to enable access BM internal SRAM */
static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
				  u8 target, u8 attr)
{
	u32 win_enable, win_protect;
	int i;

	win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);

	if (pp->bm_win_id < 0) {
		/* Find first not occupied window */
		for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
			if (win_enable & (1 << i)) {
				pp->bm_win_id = i;
				break;
			}
		}
		if (i == MVNETA_MAX_DECODE_WIN)
			return -ENOMEM;
	} else {
		i = pp->bm_win_id;
	}

	mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
	mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);

	if (i < 4)
		mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);

	mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
		    (attr << 8) | target);

	mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);

	win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
	win_protect |= 3 << (2 * i);
	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);

	win_enable &= ~(1 << i);
	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);

	return 0;
}

1087
static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1088
{
1089
	u32 wsize;
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	u8 target, attr;
	int err;

	/* Get BM window information */
	err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
					 &target, &attr);
	if (err < 0)
		return err;

	pp->bm_win_id = -1;

	/* Open NETA -> BM window */
	err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
				     target, attr);
	if (err < 0) {
		netdev_info(pp->dev, "fail to configure mbus window to BM\n");
		return err;
	}
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	return 0;
}

/* Assign and initialize pools for port. In case of fail
 * buffer manager will remain disabled for current port.
 */
static int mvneta_bm_port_init(struct platform_device *pdev,
			       struct mvneta_port *pp)
{
	struct device_node *dn = pdev->dev.of_node;
	u32 long_pool_id, short_pool_id;

	if (!pp->neta_armada3700) {
		int ret;

		ret = mvneta_bm_port_mbus_init(pp);
		if (ret)
			return ret;
	}
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173

	if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
		netdev_info(pp->dev, "missing long pool id\n");
		return -EINVAL;
	}

	/* Create port's long pool depending on mtu */
	pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
					   MVNETA_BM_LONG, pp->id,
					   MVNETA_RX_PKT_SIZE(pp->dev->mtu));
	if (!pp->pool_long) {
		netdev_info(pp->dev, "fail to obtain long pool for port\n");
		return -ENOMEM;
	}

	pp->pool_long->port_map |= 1 << pp->id;

	mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
				   pp->pool_long->id);

	/* If short pool id is not defined, assume using single pool */
	if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
		short_pool_id = long_pool_id;

	/* Create port's short pool */
	pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
					    MVNETA_BM_SHORT, pp->id,
					    MVNETA_BM_SHORT_PKT_SIZE);
	if (!pp->pool_short) {
		netdev_info(pp->dev, "fail to obtain short pool for port\n");
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		return -ENOMEM;
	}

	if (short_pool_id != long_pool_id) {
		pp->pool_short->port_map |= 1 << pp->id;
		mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
					   pp->pool_short->id);
	}

	return 0;
}

/* Update settings of a pool for bigger packets */
static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
{
	struct mvneta_bm_pool *bm_pool = pp->pool_long;
1174
	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1175 1176 1177 1178
	int num;

	/* Release all buffers from long pool */
	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1179
	if (hwbm_pool->buf_num) {
1180 1181 1182 1183 1184 1185 1186
		WARN(1, "cannot free all buffers in pool %d\n",
		     bm_pool->id);
		goto bm_mtu_err;
	}

	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1187 1188
	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1189 1190

	/* Fill entire long pool */
1191
	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1192
	if (num != hwbm_pool->size) {
1193
		WARN(1, "pool %d: %d of %d allocated\n",
1194
		     bm_pool->id, num, hwbm_pool->size);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
		goto bm_mtu_err;
	}
	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);

	return;

bm_mtu_err:
	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
	mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);

	pp->bm_priv = NULL;
1206
	pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1207 1208 1209 1210
	mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
	netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
}

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
	int queue;
	u32 q_map;

	/* Enable all initialized TXs. */
	q_map = 0;
	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
1221
		if (txq->descs)
1222 1223 1224 1225
			q_map |= (1 << queue);
	}
	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);

1226
	q_map = 0;
1227
	/* Enable all initialized RXQs. */
1228 1229 1230
	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];

1231
		if (rxq->descs)
1232 1233 1234
			q_map |= (1 << queue);
	}
	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
}

/* Stop the Ethernet port activity */
static void mvneta_port_down(struct mvneta_port *pp)
{
	u32 val;
	int count;

	/* Stop Rx port activity. Check port Rx activity. */
	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;

	/* Issue stop command for active channels only */
	if (val != 0)
		mvreg_write(pp, MVNETA_RXQ_CMD,
			    val << MVNETA_RXQ_DISABLE_SHIFT);

	/* Wait for all Rx activity to terminate. */
	count = 0;
	do {
		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
			netdev_warn(pp->dev,
1256
				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1257 1258 1259 1260 1261 1262
				    val);
			break;
		}
		mdelay(1);

		val = mvreg_read(pp, MVNETA_RXQ_CMD);
1263
	} while (val & MVNETA_RXQ_ENABLE_MASK);
1264 1265

	/* Stop Tx port activity. Check port Tx activity. Issue stop
1266 1267
	 * command for active channels only
	 */
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;

	if (val != 0)
		mvreg_write(pp, MVNETA_TXQ_CMD,
			    (val << MVNETA_TXQ_DISABLE_SHIFT));

	/* Wait for all Tx activity to terminate. */
	count = 0;
	do {
		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
			netdev_warn(pp->dev,
				    "TIMEOUT for TX stopped status=0x%08x\n",
				    val);
			break;
		}
		mdelay(1);

		/* Check TX Command reg that all Txqs are stopped */
		val = mvreg_read(pp, MVNETA_TXQ_CMD);

1288
	} while (val & MVNETA_TXQ_ENABLE_MASK);
1289 1290 1291 1292 1293 1294

	/* Double check to verify that TX FIFO is empty */
	count = 0;
	do {
		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
			netdev_warn(pp->dev,
1295
				    "TX FIFO empty timeout status=0x%08x\n",
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
				    val);
			break;
		}
		mdelay(1);

		val = mvreg_read(pp, MVNETA_PORT_STATUS);
	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
		 (val & MVNETA_TX_IN_PRGRS));

	udelay(200);
}

/* Enable the port by setting the port enable bit of the MAC control register */
static void mvneta_port_enable(struct mvneta_port *pp)
{
	u32 val;

	/* Enable port */
	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val |= MVNETA_GMAC0_PORT_ENABLE;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
}

/* Disable the port and wait for about 200 usec before retuning */
static void mvneta_port_disable(struct mvneta_port *pp)
{
	u32 val;

	/* Reset the Enable bit in the Serial Control Register */
	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
	val &= ~MVNETA_GMAC0_PORT_ENABLE;
	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);

	udelay(200);
}

/* Multicast tables methods */

/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		val = 0;
	} else {
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
}

/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		val = 0;
	} else {
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xfc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);

}

/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
{
	int offset;
	u32 val;

	if (queue == -1) {
		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
		val = 0;
	} else {
		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
		val = 0x1 | (queue << 1);
		val |= (val << 24) | (val << 16) | (val << 8);
	}

	for (offset = 0; offset <= 0xfc; offset += 4)
		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
}

1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
static void mvneta_percpu_unmask_interrupt(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are unmasked, but actually only the ones
	 * mapped to this CPU will be unmasked
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
		    MVNETA_RX_INTR_MASK_ALL |
		    MVNETA_TX_INTR_MASK_ALL |
		    MVNETA_MISCINTR_INTR_MASK);
}

static void mvneta_percpu_mask_interrupt(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are masked, but actually only the ones
	 * mapped to this CPU will be masked
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}

static void mvneta_percpu_clear_intr_cause(void *arg)
{
	struct mvneta_port *pp = arg;

	/* All the queue are cleared, but actually only the ones
	 * mapped to this CPU will be cleared
	 */
	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
}

1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
/* This method sets defaults to the NETA port:
 *	Clears interrupt Cause and Mask registers.
 *	Clears all MAC tables.
 *	Sets defaults to all registers.
 *	Resets RX and TX descriptor rings.
 *	Resets PHY.
 * This method can be called after mvneta_port_down() to return the port
 *	settings to defaults.
 */
static void mvneta_defaults_set(struct mvneta_port *pp)
{
	int cpu;
	int queue;
	u32 val;
1439
	int max_cpu = num_present_cpus();
1440 1441

	/* Clear all Cause registers */
1442
	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1443 1444

	/* Mask all interrupts */
1445
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1446 1447 1448 1449 1450
	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);

	/* Enable MBUS Retry bit16 */
	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);

1451 1452 1453 1454
	/* Set CPU queue access map. CPUs are assigned to the RX and
	 * TX queues modulo their number. If there is only one TX
	 * queue then it is assigned to the CPU associated to the
	 * default RX queue.
1455
	 */
1456 1457
	for_each_present_cpu(cpu) {
		int rxq_map = 0, txq_map = 0;
1458
		int rxq, txq;
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
		if (!pp->neta_armada3700) {
			for (rxq = 0; rxq < rxq_number; rxq++)
				if ((rxq % max_cpu) == cpu)
					rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);

			for (txq = 0; txq < txq_number; txq++)
				if ((txq % max_cpu) == cpu)
					txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);

			/* With only one TX queue we configure a special case
			 * which will allow to get all the irq on a single
			 * CPU
			 */
			if (txq_number == 1)
				txq_map = (cpu == pp->rxq_def) ?
					MVNETA_CPU_TXQ_ACCESS(1) : 0;
1475

1476 1477 1478 1479
		} else {
			txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
			rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
		}
1480 1481 1482

		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
	}
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498

	/* Reset RX and TX DMAs */
	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);

	/* Disable Legacy WRR, Disable EJP, Release from reset */
	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
	for (queue = 0; queue < txq_number; queue++) {
		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
	}

	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);

	/* Set Port Acceleration Mode */
1499 1500 1501 1502 1503 1504
	if (pp->bm_priv)
		/* HW buffer management + legacy parser */
		val = MVNETA_ACC_MODE_EXT2;
	else
		/* SW buffer management + legacy parser */
		val = MVNETA_ACC_MODE_EXT1;
1505 1506
	mvreg_write(pp, MVNETA_ACC_MODE, val);

1507 1508 1509
	if (pp->bm_priv)
		mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);

1510
	/* Update val of portCfg register accordingly with all RxQueue types */
1511
	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	mvreg_write(pp, MVNETA_PORT_CONFIG, val);

	val = 0;
	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);

	/* Build PORT_SDMA_CONFIG_REG */
	val = 0;

	/* Default burst size */
	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1524
	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1525

1526 1527 1528
#if defined(__BIG_ENDIAN)
	val |= MVNETA_DESC_SWAP;
#endif
1529 1530 1531 1532

	/* Assign port SDMA configuration */
	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);

1533 1534 1535 1536 1537 1538 1539
	/* Disable PHY polling in hardware, since we're using the
	 * kernel phylib to do this.
	 */
	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
	val &= ~MVNETA_PHY_POLLING_ENABLE;
	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);

1540 1541 1542 1543 1544 1545 1546 1547
	mvneta_set_ucast_table(pp, -1);
	mvneta_set_special_mcast_table(pp, -1);
	mvneta_set_other_mcast_table(pp, -1);

	/* Set port interrupt enable register - default enable all */
	mvreg_write(pp, MVNETA_INTR_ENABLE,
		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1548 1549

	mvneta_mib_counters_clear(pp);
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
}

/* Set max sizes for tx queues */
static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)

{
	u32 val, size, mtu;
	int queue;

	mtu = max_tx_size * 8;
	if (mtu > MVNETA_TX_MTU_MAX)
		mtu = MVNETA_TX_MTU_MAX;

	/* Set MTU */
	val = mvreg_read(pp, MVNETA_TX_MTU);
	val &= ~MVNETA_TX_MTU_MAX;
	val |= mtu;
	mvreg_write(pp, MVNETA_TX_MTU, val);

	/* TX token size and all TXQs token size must be larger that MTU */
	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);

	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
	if (size < mtu) {
		size = mtu;
		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
		val |= size;
		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
	}
	for (queue = 0; queue < txq_number; queue++) {
		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));

		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
		if (size < mtu) {
			size = mtu;
			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
			val |= size;
			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
		}
	}
}

/* Set unicast address */
static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
				  int queue)
{
	unsigned int unicast_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	/* Locate the Unicast table entry */
	last_nibble = (0xf & last_nibble);

	/* offset from unicast tbl base */
	tbl_offset = (last_nibble / 4) * 4;

	/* offset within the above reg  */
	reg_offset = last_nibble % 4;

	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));

	if (queue == -1) {
		/* Clear accepts frame bit at specified unicast DA tbl entry */
		unicast_reg &= ~(0xff << (8 * reg_offset));
	} else {
		unicast_reg &= ~(0xff << (8 * reg_offset));
		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
}

/* Set mac address */
static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
				int queue)
{
	unsigned int mac_h;
	unsigned int mac_l;

	if (queue != -1) {
		mac_l = (addr[4] << 8) | (addr[5]);
		mac_h = (addr[0] << 24) | (addr[1] << 16) |
			(addr[2] << 8) | (addr[3] << 0);

		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
	}

	/* Accept frames of this address */
	mvneta_set_ucast_addr(pp, addr[5], queue);
}

1642 1643
/* Set the number of packets that will be received before RX interrupt
 * will be generated by HW.
1644 1645 1646 1647 1648 1649 1650 1651
 */
static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{
	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
		    value | MVNETA_RXQ_NON_OCCUPIED(0));
}

1652 1653
/* Set the time delay in usec before RX interrupt will be generated by
 * HW.
1654 1655 1656 1657
 */
static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
				    struct mvneta_rx_queue *rxq, u32 value)
{
T
Thomas Petazzoni 已提交
1658 1659 1660 1661 1662
	u32 val;
	unsigned long clk_rate;

	clk_rate = clk_get_rate(pp->clk);
	val = (clk_rate / 1000000) * value;
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682

	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
}

/* Set threshold for TX_DONE pkts coalescing */
static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
					 struct mvneta_tx_queue *txq, u32 value)
{
	u32 val;

	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));

	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);

	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
}

/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1683 1684
				u32 phys_addr, void *virt_addr,
				struct mvneta_rx_queue *rxq)
1685
{
1686 1687
	int i;

1688
	rx_desc->buf_phys_addr = phys_addr;
1689 1690
	i = rx_desc - rxq->descs;
	rxq->buf_virt_addr[i] = virt_addr;
1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
}

/* Decrement sent descriptors counter */
static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq,
				     int sent_desc)
{
	u32 val;

	/* Only 255 TX descriptors can be updated at once */
	while (sent_desc > 0xff) {
		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
		sent_desc = sent_desc - 0xff;
	}

	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
}

/* Get number of TX descriptors already sent by HW */
static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
					struct mvneta_tx_queue *txq)
{
	u32 val;
	int sent_desc;

	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
		MVNETA_TXQ_SENT_DESC_SHIFT;

	return sent_desc;
}

1725
/* Get number of sent descriptors and decrement counter.
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
 *  The number of sent descriptors is returned.
 */
static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
				     struct mvneta_tx_queue *txq)
{
	int sent_desc;

	/* Get number of sent descriptors */
	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);

	/* Decrement sent descriptors counter */
	if (sent_desc)
		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);

	return sent_desc;
}

/* Set TXQ descriptors fields relevant for CSUM calculation */
static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
				int ip_hdr_len, int l4_proto)
{
	u32 command;

	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1750 1751 1752
	 * G_L4_chk, L4_type; required only for checksum
	 * calculation
	 */
1753 1754 1755
	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;

1756
	if (l3_proto == htons(ETH_P_IP))
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
		command |= MVNETA_TXD_IP_CSUM;
	else
		command |= MVNETA_TX_L3_IP6;

	if (l4_proto == IPPROTO_TCP)
		command |=  MVNETA_TX_L4_CSUM_FULL;
	else if (l4_proto == IPPROTO_UDP)
		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
	else
		command |= MVNETA_TX_L4_CSUM_NOT;

	return command;
}


/* Display more error info */
static void mvneta_rx_error(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc)
{
1776
	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1777 1778
	u32 status = rx_desc->status;

1779 1780 1781 1782 1783
	/* update per-cpu counter */
	u64_stats_update_begin(&stats->syncp);
	stats->rx_errors++;
	u64_stats_update_end(&stats->syncp);

1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
	case MVNETA_RXD_ERR_CRC:
		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_OVERRUN:
		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_LEN:
		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	case MVNETA_RXD_ERR_RESOURCE:
		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
			   status, rx_desc->data_size);
		break;
	}
}

1804 1805
/* Handle RX checksum offload based on the descriptor's status */
static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1806 1807
			   struct sk_buff *skb)
{
1808 1809
	if ((pp->dev->features & NETIF_F_RXCSUM) &&
	    (status & MVNETA_RXD_L3_IP4) &&
1810
	    (status & MVNETA_RXD_L4_CSUM_OK)) {
1811 1812 1813 1814 1815 1816 1817 1818
		skb->csum = 0;
		skb->ip_summed = CHECKSUM_UNNECESSARY;
		return;
	}

	skb->ip_summed = CHECKSUM_NONE;
}

1819 1820 1821 1822
/* Return tx queue pointer (find last set bit) according to <cause> returned
 * form tx_done reg. <cause> must not be null. The return value is always a
 * valid queue for matching the first one found in <cause>.
 */
1823 1824 1825 1826 1827
static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
						     u32 cause)
{
	int queue = fls(cause) - 1;

1828
	return &pp->txqs[queue];
1829 1830 1831 1832
}

/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
M
Marcin Wojtas 已提交
1833
				 struct mvneta_tx_queue *txq, int num,
1834
				 struct netdev_queue *nq, bool napi)
1835
{
M
Marcin Wojtas 已提交
1836
	unsigned int bytes_compl = 0, pkts_compl = 0;
1837 1838 1839
	int i;

	for (i = 0; i < num; i++) {
1840
		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1841 1842
		struct mvneta_tx_desc *tx_desc = txq->descs +
			txq->txq_get_index;
M
Marcin Wojtas 已提交
1843

1844 1845
		mvneta_txq_inc_get(txq);

L
Lorenzo Bianconi 已提交
1846 1847
		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
		    buf->type != MVNETA_TYPE_XDP_TX)
1848 1849 1850
			dma_unmap_single(pp->dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size, DMA_TO_DEVICE);
L
Lorenzo Bianconi 已提交
1851 1852 1853 1854 1855 1856
		if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
			bytes_compl += buf->skb->len;
			pkts_compl++;
			dev_kfree_skb_any(buf->skb);
		} else if (buf->type == MVNETA_TYPE_XDP_TX ||
			   buf->type == MVNETA_TYPE_XDP_NDO) {
1857 1858 1859 1860
			if (napi && buf->type == MVNETA_TYPE_XDP_TX)
				xdp_return_frame_rx_napi(buf->xdpf);
			else
				xdp_return_frame(buf->xdpf);
L
Lorenzo Bianconi 已提交
1861
		}
1862
	}
M
Marcin Wojtas 已提交
1863 1864

	netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1865 1866 1867
}

/* Handle end of transmission */
1868
static void mvneta_txq_done(struct mvneta_port *pp,
1869 1870 1871 1872 1873 1874
			   struct mvneta_tx_queue *txq)
{
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
	int tx_done;

	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1875 1876 1877
	if (!tx_done)
		return;

1878
	mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1879 1880 1881 1882

	txq->count -= tx_done;

	if (netif_tx_queue_stopped(nq)) {
1883
		if (txq->count <= txq->tx_wake_threshold)
1884 1885 1886 1887
			netif_tx_wake_queue(nq);
	}
}

1888
/* Refill processing for SW buffer management */
1889
/* Allocate page per descriptor */
1890
static int mvneta_rx_refill(struct mvneta_port *pp,
1891
			    struct mvneta_rx_desc *rx_desc,
1892 1893
			    struct mvneta_rx_queue *rxq,
			    gfp_t gfp_mask)
1894 1895
{
	dma_addr_t phys_addr;
1896
	struct page *page;
1897

1898 1899
	page = page_pool_alloc_pages(rxq->page_pool,
				     gfp_mask | __GFP_NOWARN);
1900
	if (!page)
1901 1902
		return -ENOMEM;

1903
	phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1904
	mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1905

1906 1907 1908 1909 1910 1911 1912 1913
	return 0;
}

/* Handle tx checksum */
static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
{
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		int ip_hdr_len = 0;
1914
		__be16 l3_proto = vlan_get_protocol(skb);
1915 1916
		u8 l4_proto;

1917
		if (l3_proto == htons(ETH_P_IP)) {
1918 1919 1920 1921 1922
			struct iphdr *ip4h = ip_hdr(skb);

			/* Calculate IPv4 checksum and L4 checksum */
			ip_hdr_len = ip4h->ihl;
			l4_proto = ip4h->protocol;
1923
		} else if (l3_proto == htons(ETH_P_IPV6)) {
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
			struct ipv6hdr *ip6h = ipv6_hdr(skb);

			/* Read l4_protocol from one of IPv6 extra headers */
			if (skb_network_header_len(skb) > 0)
				ip_hdr_len = (skb_network_header_len(skb) >> 2);
			l4_proto = ip6h->nexthdr;
		} else
			return MVNETA_TX_L4_CSUM_NOT;

		return mvneta_txq_desc_csum(skb_network_offset(skb),
1934
					    l3_proto, ip_hdr_len, l4_proto);
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
	}

	return MVNETA_TX_L4_CSUM_NOT;
}

/* Drop packets received by the RXQ and free buffers */
static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
				 struct mvneta_rx_queue *rxq)
{
	int rx_done, i;

	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	if (rx_done)
		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);

	if (pp->bm_priv) {
		for (i = 0; i < rx_done; i++) {
			struct mvneta_rx_desc *rx_desc =
						  mvneta_rxq_next_desc_get(rxq);
			u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
			struct mvneta_bm_pool *bm_pool;

			bm_pool = &pp->bm_priv->bm_pools[pool_id];
			/* Return dropped buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);
		}
		return;
	}

1965 1966
	for (i = 0; i < rxq->size; i++) {
		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1967
		void *data = rxq->buf_virt_addr[i];
1968 1969
		if (!data || !(rx_desc->buf_phys_addr))
			continue;
1970

1971
		page_pool_put_full_page(rxq->page_pool, data, false);
1972
	}
1973 1974 1975 1976
	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
		xdp_rxq_info_unreg(&rxq->xdp_rxq);
	page_pool_destroy(rxq->page_pool);
	rxq->page_pool = NULL;
1977
}
1978

1979
static void
1980 1981
mvneta_update_stats(struct mvneta_port *pp,
		    struct mvneta_stats *ps)
1982 1983 1984 1985
{
	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

	u64_stats_update_begin(&stats->syncp);
1986 1987
	stats->es.ps.rx_packets += ps->rx_packets;
	stats->es.ps.rx_bytes += ps->rx_bytes;
1988 1989 1990 1991
	/* xdp */
	stats->es.ps.xdp_redirect += ps->xdp_redirect;
	stats->es.ps.xdp_pass += ps->xdp_pass;
	stats->es.ps.xdp_drop += ps->xdp_drop;
1992 1993 1994
	u64_stats_update_end(&stats->syncp);
}

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005
static inline
int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
{
	struct mvneta_rx_desc *rx_desc;
	int curr_desc = rxq->first_to_refill;
	int i;

	for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
		rx_desc = rxq->descs + curr_desc;
		if (!(rx_desc->buf_phys_addr)) {
			if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2006 2007
				struct mvneta_pcpu_stats *stats;

2008 2009
				pr_err("Can't refill queue %d. Done %d from %d\n",
				       rxq->id, i, rxq->refill_num);
2010 2011 2012 2013 2014

				stats = this_cpu_ptr(pp->stats);
				u64_stats_update_begin(&stats->syncp);
				stats->es.refill_error++;
				u64_stats_update_end(&stats->syncp);
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
				break;
			}
		}
		curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
	}
	rxq->refill_num -= i;
	rxq->first_to_refill = curr_desc;

	return i;
}

2026 2027 2028 2029 2030 2031 2032 2033 2034 2035
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		    struct xdp_buff *xdp, int sync_len, bool napi)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	int i;

	for (i = 0; i < sinfo->nr_frags; i++)
		page_pool_put_full_page(rxq->page_pool,
					skb_frag_page(&sinfo->frags[i]), napi);
2036 2037
	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
			   sync_len, napi);
2038 2039
}

L
Lorenzo Bianconi 已提交
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
			struct xdp_frame *xdpf, bool dma_map)
{
	struct mvneta_tx_desc *tx_desc;
	struct mvneta_tx_buf *buf;
	dma_addr_t dma_addr;

	if (txq->count >= txq->tx_stop_threshold)
		return MVNETA_XDP_DROPPED;

	tx_desc = mvneta_txq_next_desc_get(txq);

	buf = &txq->buf[txq->txq_put_index];
	if (dma_map) {
		/* ndo_xdp_xmit */
		dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
					  xdpf->len, DMA_TO_DEVICE);
		if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
			mvneta_txq_desc_put(txq);
			return MVNETA_XDP_DROPPED;
		}
		buf->type = MVNETA_TYPE_XDP_NDO;
	} else {
		struct page *page = virt_to_page(xdpf->data);

		dma_addr = page_pool_get_dma_addr(page) +
			   sizeof(*xdpf) + xdpf->headroom;
		dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
					   xdpf->len, DMA_BIDIRECTIONAL);
		buf->type = MVNETA_TYPE_XDP_TX;
	}
	buf->xdpf = xdpf;

	tx_desc->command = MVNETA_TXD_FLZ_DESC;
	tx_desc->buf_phys_addr = dma_addr;
	tx_desc->data_size = xdpf->len;

	mvneta_txq_inc_put(txq);
	txq->pending++;
	txq->count++;

	return MVNETA_XDP_TX;
}

static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{
2088
	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
L
Lorenzo Bianconi 已提交
2089 2090 2091 2092 2093 2094
	struct mvneta_tx_queue *txq;
	struct netdev_queue *nq;
	struct xdp_frame *xdpf;
	int cpu;
	u32 ret;

2095
	xdpf = xdp_convert_buff_to_frame(xdp);
L
Lorenzo Bianconi 已提交
2096 2097 2098 2099 2100 2101 2102 2103 2104
	if (unlikely(!xdpf))
		return MVNETA_XDP_DROPPED;

	cpu = smp_processor_id();
	txq = &pp->txqs[cpu % txq_number];
	nq = netdev_get_tx_queue(pp->dev, txq->id);

	__netif_tx_lock(nq, cpu);
	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2105 2106 2107 2108 2109 2110 2111
	if (ret == MVNETA_XDP_TX) {
		u64_stats_update_begin(&stats->syncp);
		stats->es.ps.tx_bytes += xdpf->len;
		stats->es.ps.tx_packets++;
		stats->es.ps.xdp_tx++;
		u64_stats_update_end(&stats->syncp);

L
Lorenzo Bianconi 已提交
2112
		mvneta_txq_pend_desc_add(pp, txq, 0);
2113 2114 2115 2116
	} else {
		u64_stats_update_begin(&stats->syncp);
		stats->es.ps.xdp_tx_err++;
		u64_stats_update_end(&stats->syncp);
2117
	}
L
Lorenzo Bianconi 已提交
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	__netif_tx_unlock(nq);

	return ret;
}

static int
mvneta_xdp_xmit(struct net_device *dev, int num_frame,
		struct xdp_frame **frames, u32 flags)
{
	struct mvneta_port *pp = netdev_priv(dev);
2128 2129
	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
	int i, nxmit_byte = 0, nxmit = num_frame;
L
Lorenzo Bianconi 已提交
2130 2131 2132 2133 2134
	int cpu = smp_processor_id();
	struct mvneta_tx_queue *txq;
	struct netdev_queue *nq;
	u32 ret;

2135 2136 2137
	if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
		return -ENETDOWN;

L
Lorenzo Bianconi 已提交
2138 2139 2140 2141 2142 2143 2144 2145 2146
	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
		return -EINVAL;

	txq = &pp->txqs[cpu % txq_number];
	nq = netdev_get_tx_queue(pp->dev, txq->id);

	__netif_tx_lock(nq, cpu);
	for (i = 0; i < num_frame; i++) {
		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2147 2148 2149
		if (ret == MVNETA_XDP_TX) {
			nxmit_byte += frames[i]->len;
		} else {
L
Lorenzo Bianconi 已提交
2150
			xdp_return_frame_rx_napi(frames[i]);
2151
			nxmit--;
L
Lorenzo Bianconi 已提交
2152 2153 2154 2155 2156 2157 2158
		}
	}

	if (unlikely(flags & XDP_XMIT_FLUSH))
		mvneta_txq_pend_desc_add(pp, txq, 0);
	__netif_tx_unlock(nq);

2159 2160 2161 2162
	u64_stats_update_begin(&stats->syncp);
	stats->es.ps.tx_bytes += nxmit_byte;
	stats->es.ps.tx_packets += nxmit;
	stats->es.ps.xdp_xmit += nxmit;
2163
	stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2164 2165 2166
	u64_stats_update_end(&stats->syncp);

	return nxmit;
L
Lorenzo Bianconi 已提交
2167 2168
}

2169 2170
static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2171
	       struct bpf_prog *prog, struct xdp_buff *xdp,
2172
	       u32 frame_sz, struct mvneta_stats *stats)
2173
{
2174
	unsigned int len, data_len, sync;
2175 2176 2177
	u32 ret, act;

	len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2178
	data_len = xdp->data_end - xdp->data;
2179
	act = bpf_prog_run_xdp(prog, xdp);
2180

2181 2182 2183 2184
	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
	sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
	sync = max(sync, len);

2185 2186
	switch (act) {
	case XDP_PASS:
2187
		stats->xdp_pass++;
2188
		return MVNETA_XDP_PASS;
2189 2190 2191 2192
	case XDP_REDIRECT: {
		int err;

		err = xdp_do_redirect(pp->dev, xdp, prog);
2193
		if (unlikely(err)) {
2194
			mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2195 2196 2197
			ret = MVNETA_XDP_DROPPED;
		} else {
			ret = MVNETA_XDP_REDIR;
2198
			stats->xdp_redirect++;
2199 2200 2201
		}
		break;
	}
L
Lorenzo Bianconi 已提交
2202 2203
	case XDP_TX:
		ret = mvneta_xdp_xmit_back(pp, xdp);
2204 2205
		if (ret != MVNETA_XDP_TX)
			mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
L
Lorenzo Bianconi 已提交
2206
		break;
2207 2208
	default:
		bpf_warn_invalid_xdp_action(act);
2209
		fallthrough;
2210 2211
	case XDP_ABORTED:
		trace_xdp_exception(pp->dev, prog, act);
2212
		fallthrough;
2213
	case XDP_DROP:
2214
		mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
2215
		ret = MVNETA_XDP_DROPPED;
2216
		stats->xdp_drop++;
2217 2218 2219
		break;
	}

2220
	stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
2221 2222
	stats->rx_packets++;

2223 2224 2225
	return ret;
}

2226
static void
2227 2228 2229
mvneta_swbm_rx_frame(struct mvneta_port *pp,
		     struct mvneta_rx_desc *rx_desc,
		     struct mvneta_rx_queue *rxq,
2230
		     struct xdp_buff *xdp, int *size,
2231
		     struct page *page)
2232 2233 2234 2235 2236
{
	unsigned char *data = page_address(page);
	int data_len = -MVNETA_MH_SIZE, len;
	struct net_device *dev = pp->dev;
	enum dma_data_direction dma_dir;
2237
	struct skb_shared_info *sinfo;
2238

2239
	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2240 2241 2242
		len = MVNETA_MAX_RX_BUF_SIZE;
		data_len += len;
	} else {
2243
		len = *size;
2244 2245
		data_len += len - ETH_FCS_LEN;
	}
2246
	*size = *size - len;
2247 2248 2249 2250 2251 2252

	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
	dma_sync_single_for_cpu(dev->dev.parent,
				rx_desc->buf_phys_addr,
				len, dma_dir);

2253 2254
	rx_desc->buf_phys_addr = 0;

2255 2256 2257
	/* Prefetch header */
	prefetch(data);

2258
	xdp->data_hard_start = data;
2259
	xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
2260 2261 2262
	xdp->data_end = xdp->data + data_len;
	xdp_set_data_meta_invalid(xdp);

2263 2264
	sinfo = xdp_get_shared_info_from_buff(xdp);
	sinfo->nr_frags = 0;
2265 2266 2267 2268 2269 2270
}

static void
mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
			    struct mvneta_rx_desc *rx_desc,
			    struct mvneta_rx_queue *rxq,
2271
			    struct xdp_buff *xdp, int *size,
2272 2273
			    struct page *page)
{
2274
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2275 2276 2277 2278
	struct net_device *dev = pp->dev;
	enum dma_data_direction dma_dir;
	int data_len, len;

2279
	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2280 2281 2282
		len = MVNETA_MAX_RX_BUF_SIZE;
		data_len = len;
	} else {
2283
		len = *size;
2284 2285 2286 2287 2288 2289
		data_len = len - ETH_FCS_LEN;
	}
	dma_dir = page_pool_get_dma_dir(rxq->page_pool);
	dma_sync_single_for_cpu(dev->dev.parent,
				rx_desc->buf_phys_addr,
				len, dma_dir);
2290
	rx_desc->buf_phys_addr = 0;
2291 2292 2293 2294 2295 2296 2297 2298

	if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
		skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];

		skb_frag_off_set(frag, pp->rx_offset_correction);
		skb_frag_size_set(frag, data_len);
		__skb_frag_set_page(frag, page);
		sinfo->nr_frags++;
2299 2300
	} else {
		page_pool_put_full_page(rxq->page_pool, page, true);
2301
	}
2302
	*size -= len;
2303 2304
}

2305
static struct sk_buff *
M
Matteo Croce 已提交
2306
mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
		      struct xdp_buff *xdp, u32 desc_status)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
	int i, num_frags = sinfo->nr_frags;
	struct sk_buff *skb;

	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
	if (!skb)
		return ERR_PTR(-ENOMEM);

2317
	skb_mark_for_recycle(skb);
2318 2319 2320 2321 2322 2323

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	skb_put(skb, xdp->data_end - xdp->data);
	mvneta_rx_csum(pp, desc_status, skb);

	for (i = 0; i < num_frags; i++) {
2324
		skb_frag_t *frag = &sinfo->frags[i];
2325 2326

		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2327 2328
				skb_frag_page(frag), skb_frag_off(frag),
				skb_frag_size(frag), PAGE_SIZE);
2329 2330 2331 2332 2333
	}

	return skb;
}

2334
/* Main rx processing when using software buffer management */
2335
static int mvneta_rx_swbm(struct napi_struct *napi,
2336
			  struct mvneta_port *pp, int budget,
2337 2338
			  struct mvneta_rx_queue *rxq)
{
2339
	int rx_proc = 0, rx_todo, refill, size = 0;
2340
	struct net_device *dev = pp->dev;
2341 2342 2343 2344
	struct xdp_buff xdp_buf = {
		.frame_sz = PAGE_SIZE,
		.rxq = &rxq->xdp_rxq,
	};
2345
	struct mvneta_stats ps = {};
2346
	struct bpf_prog *xdp_prog;
2347
	u32 desc_status, frame_sz;
2348 2349

	/* Get number of received packets */
2350
	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2351

2352 2353 2354
	rcu_read_lock();
	xdp_prog = READ_ONCE(pp->xdp_prog);

2355
	/* Fairness NAPI loop */
2356
	while (rx_proc < budget && rx_proc < rx_todo) {
2357
		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2358
		u32 rx_status, index;
2359
		struct sk_buff *skb;
2360
		struct page *page;
2361

2362
		index = rx_desc - rxq->descs;
2363
		page = (struct page *)rxq->buf_virt_addr[index];
2364

2365 2366 2367 2368 2369 2370 2371 2372
		rx_status = rx_desc->status;
		rx_proc++;
		rxq->refill_num++;

		if (rx_status & MVNETA_RXD_FIRST_DESC) {
			/* Check errors only for FIRST descriptor */
			if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
				mvneta_rx_error(pp, rx_desc);
2373
				goto next;
2374
			}
2375

2376 2377
			size = rx_desc->data_size;
			frame_sz = size - ETH_FCS_LEN;
2378
			desc_status = rx_status;
2379

2380
			mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2381
					     &size, page);
2382
		} else {
2383 2384 2385 2386
			if (unlikely(!xdp_buf.data_hard_start)) {
				rx_desc->buf_phys_addr = 0;
				page_pool_put_full_page(rxq->page_pool, page,
							true);
2387
				continue;
2388
			}
2389 2390

			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2391
						    &size, page);
2392
		} /* Middle or Last descriptor */
2393

2394 2395
		if (!(rx_status & MVNETA_RXD_LAST_DESC))
			/* no last descriptor this time */
2396 2397
			continue;

2398
		if (size) {
2399 2400
			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
			goto next;
2401
		}
2402

2403
		if (xdp_prog &&
2404
		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2405 2406
			goto next;

M
Matteo Croce 已提交
2407
		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2408 2409 2410 2411 2412 2413 2414 2415 2416
		if (IS_ERR(skb)) {
			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);

			u64_stats_update_begin(&stats->syncp);
			stats->es.skb_alloc_error++;
			stats->rx_dropped++;
			u64_stats_update_end(&stats->syncp);
2417

2418 2419
			goto next;
		}
2420

2421 2422
		ps.rx_bytes += skb->len;
		ps.rx_packets++;
2423

2424 2425 2426 2427
		skb->protocol = eth_type_trans(skb, dev);
		napi_gro_receive(napi, skb);
next:
		xdp_buf.data_hard_start = NULL;
2428
	}
2429 2430
	rcu_read_unlock();

2431 2432 2433
	if (xdp_buf.data_hard_start)
		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);

2434
	if (ps.xdp_redirect)
2435
		xdp_do_flush_map();
2436

2437 2438
	if (ps.rx_packets)
		mvneta_update_stats(pp, &ps);
2439

2440 2441 2442
	/* return some buffers to hardware queue, one at a time is too slow */
	refill = mvneta_rx_refill_queue(pp, rxq);

2443
	/* Update rxq management counters */
2444
	mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2445

2446
	return ps.rx_packets;
2447 2448
}

2449
/* Main rx processing when using hardware buffer management */
2450 2451
static int mvneta_rx_hwbm(struct napi_struct *napi,
			  struct mvneta_port *pp, int rx_todo,
2452
			  struct mvneta_rx_queue *rxq)
2453 2454
{
	struct net_device *dev = pp->dev;
2455
	int rx_done;
2456 2457
	u32 rcvd_pkts = 0;
	u32 rcvd_bytes = 0;
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469

	/* Get number of received packets */
	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);

	if (rx_todo > rx_done)
		rx_todo = rx_done;

	rx_done = 0;

	/* Fairness NAPI loop */
	while (rx_done < rx_todo) {
		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2470
		struct mvneta_bm_pool *bm_pool = NULL;
2471
		struct sk_buff *skb;
2472
		unsigned char *data;
2473
		dma_addr_t phys_addr;
2474
		u32 rx_status, frag_size;
2475
		int rx_bytes, err;
2476
		u8 pool_id;
2477 2478 2479

		rx_done++;
		rx_status = rx_desc->status;
2480
		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2481
		data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2482
		phys_addr = rx_desc->buf_phys_addr;
2483 2484
		pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
		bm_pool = &pp->bm_priv->bm_pools[pool_id];
2485

2486
		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2487
		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2488 2489 2490 2491 2492
err_drop_frame_ret_pool:
			/* Return the buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);
err_drop_frame:
2493
			mvneta_rx_error(pp, rx_desc);
2494
			/* leave the descriptor untouched */
2495 2496 2497
			continue;
		}

2498 2499 2500 2501
		if (rx_bytes <= rx_copybreak) {
			/* better copy a small frame and not unmap the DMA region */
			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
			if (unlikely(!skb))
2502
				goto err_drop_frame_ret_pool;
2503

2504
			dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2505 2506 2507 2508
			                              rx_desc->buf_phys_addr,
			                              MVNETA_MH_SIZE + NET_SKB_PAD,
			                              rx_bytes,
			                              DMA_FROM_DEVICE);
2509 2510
			skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
				     rx_bytes);
2511 2512 2513

			skb->protocol = eth_type_trans(skb, dev);
			mvneta_rx_csum(pp, rx_status, skb);
2514
			napi_gro_receive(napi, skb);
2515 2516 2517 2518

			rcvd_pkts++;
			rcvd_bytes += rx_bytes;

2519 2520 2521 2522
			/* Return the buffer to the pool */
			mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
					      rx_desc->buf_phys_addr);

2523 2524 2525 2526
			/* leave the descriptor and buffer untouched */
			continue;
		}

2527
		/* Refill processing */
2528
		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2529
		if (err) {
2530 2531
			struct mvneta_pcpu_stats *stats;

2532
			netdev_err(dev, "Linux processing - Can't refill\n");
2533 2534 2535 2536 2537 2538

			stats = this_cpu_ptr(pp->stats);
			u64_stats_update_begin(&stats->syncp);
			stats->es.refill_error++;
			u64_stats_update_end(&stats->syncp);

2539
			goto err_drop_frame_ret_pool;
2540 2541
		}

2542
		frag_size = bm_pool->hwbm_pool.frag_size;
2543 2544

		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2545

2546 2547 2548
		/* After refill old buffer has to be unmapped regardless
		 * the skb is successfully built or not.
		 */
2549 2550
		dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
				 bm_pool->buf_size, DMA_FROM_DEVICE);
2551 2552 2553
		if (!skb)
			goto err_drop_frame;

2554 2555
		rcvd_pkts++;
		rcvd_bytes += rx_bytes;
2556 2557

		/* Linux processing */
2558
		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2559 2560 2561 2562
		skb_put(skb, rx_bytes);

		skb->protocol = eth_type_trans(skb, dev);

2563
		mvneta_rx_csum(pp, rx_status, skb);
2564

2565
		napi_gro_receive(napi, skb);
2566 2567
	}

2568 2569 2570 2571
	if (rcvd_pkts) {
		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

		u64_stats_update_begin(&stats->syncp);
2572 2573
		stats->es.ps.rx_packets += rcvd_pkts;
		stats->es.ps.rx_bytes += rcvd_bytes;
2574 2575
		u64_stats_update_end(&stats->syncp);
	}
2576

2577
	/* Update rxq management counters */
2578
	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2579 2580 2581 2582

	return rx_done;
}

2583 2584 2585 2586 2587
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb,
		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
{
	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2588 2589
	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
	struct mvneta_tx_desc *tx_desc;
2590 2591 2592 2593 2594 2595 2596

	tx_desc = mvneta_txq_next_desc_get(txq);
	tx_desc->data_size = hdr_len;
	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
	tx_desc->command |= MVNETA_TXD_F_DESC;
	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
				 txq->txq_put_index * TSO_HEADER_SIZE;
2597 2598 2599
	buf->type = MVNETA_TYPE_SKB;
	buf->skb = NULL;

2600 2601 2602 2603 2604 2605 2606 2607
	mvneta_txq_inc_put(txq);
}

static inline int
mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
		    struct sk_buff *skb, char *data, int size,
		    bool last_tcp, bool is_last)
{
2608
	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
	struct mvneta_tx_desc *tx_desc;

	tx_desc = mvneta_txq_next_desc_get(txq);
	tx_desc->data_size = size;
	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
						size, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev->dev.parent,
		     tx_desc->buf_phys_addr))) {
		mvneta_txq_desc_put(txq);
		return -ENOMEM;
	}

	tx_desc->command = 0;
2622 2623
	buf->type = MVNETA_TYPE_SKB;
	buf->skb = NULL;
2624 2625 2626 2627 2628 2629 2630

	if (last_tcp) {
		/* last descriptor in the TCP packet */
		tx_desc->command = MVNETA_TXD_L_DESC;

		/* last descriptor in SKB */
		if (is_last)
2631
			buf->skb = skb;
2632 2633 2634 2635 2636 2637 2638 2639
	}
	mvneta_txq_inc_put(txq);
	return 0;
}

static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
			 struct mvneta_tx_queue *txq)
{
2640
	int hdr_len, total_len, data_left;
2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	int desc_count = 0;
	struct mvneta_port *pp = netdev_priv(dev);
	struct tso_t tso;
	int i;

	/* Count needed descriptors */
	if ((txq->count + tso_count_descs(skb)) >= txq->size)
		return 0;

	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
		pr_info("*** Is this even  possible???!?!?\n");
		return 0;
	}

	/* Initialize the TSO handler, and prepare the first payload */
2656
	hdr_len = tso_start(skb, &tso);
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696

	total_len = skb->len - hdr_len;
	while (total_len > 0) {
		char *hdr;

		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
		total_len -= data_left;
		desc_count++;

		/* prepare packet headers: MAC + IP + TCP */
		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);

		mvneta_tso_put_hdr(skb, pp, txq);

		while (data_left > 0) {
			int size;
			desc_count++;

			size = min_t(int, tso.size, data_left);

			if (mvneta_tso_put_data(dev, txq, skb,
						 tso.data, size,
						 size == data_left,
						 total_len == 0))
				goto err_release;
			data_left -= size;

			tso_build_data(skb, &tso, size);
		}
	}

	return desc_count;

err_release:
	/* Release all used data descriptors; header descriptors must not
	 * be DMA-unmapped.
	 */
	for (i = desc_count - 1; i >= 0; i--) {
		struct mvneta_tx_desc *tx_desc = txq->descs + i;
2697
		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2698 2699 2700 2701 2702 2703 2704 2705 2706
			dma_unmap_single(pp->dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size,
					 DMA_TO_DEVICE);
		mvneta_txq_desc_put(txq);
	}
	return 0;
}

2707 2708 2709 2710 2711
/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
				  struct mvneta_tx_queue *txq)
{
	struct mvneta_tx_desc *tx_desc;
2712
	int i, nr_frags = skb_shinfo(skb)->nr_frags;
2713

2714
	for (i = 0; i < nr_frags; i++) {
2715
		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2716
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2717
		void *addr = skb_frag_address(frag);
2718 2719

		tx_desc = mvneta_txq_next_desc_get(txq);
2720
		tx_desc->data_size = skb_frag_size(frag);
2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731

		tx_desc->buf_phys_addr =
			dma_map_single(pp->dev->dev.parent, addr,
				       tx_desc->data_size, DMA_TO_DEVICE);

		if (dma_mapping_error(pp->dev->dev.parent,
				      tx_desc->buf_phys_addr)) {
			mvneta_txq_desc_put(txq);
			goto error;
		}

2732
		if (i == nr_frags - 1) {
2733 2734
			/* Last descriptor */
			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2735
			buf->skb = skb;
2736 2737 2738
		} else {
			/* Descriptor in the middle: Not First, Not Last */
			tx_desc->command = 0;
2739
			buf->skb = NULL;
2740
		}
2741
		buf->type = MVNETA_TYPE_SKB;
2742
		mvneta_txq_inc_put(txq);
2743 2744 2745 2746 2747 2748
	}

	return 0;

error:
	/* Release all descriptors that were used to map fragments of
2749 2750
	 * this packet, as well as the corresponding DMA mappings
	 */
2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763
	for (i = i - 1; i >= 0; i--) {
		tx_desc = txq->descs + i;
		dma_unmap_single(pp->dev->dev.parent,
				 tx_desc->buf_phys_addr,
				 tx_desc->data_size,
				 DMA_TO_DEVICE);
		mvneta_txq_desc_put(txq);
	}

	return -ENOMEM;
}

/* Main tx processing */
2764
static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2765 2766
{
	struct mvneta_port *pp = netdev_priv(dev);
2767 2768
	u16 txq_id = skb_get_queue_mapping(skb);
	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2769
	struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2770
	struct mvneta_tx_desc *tx_desc;
2771
	int len = skb->len;
2772 2773 2774 2775 2776 2777
	int frags = 0;
	u32 tx_cmd;

	if (!netif_running(dev))
		goto out;

2778 2779 2780 2781 2782
	if (skb_is_gso(skb)) {
		frags = mvneta_tx_tso(skb, dev, txq);
		goto out;
	}

2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
	frags = skb_shinfo(skb)->nr_frags + 1;

	/* Get a descriptor for the first part of the packet */
	tx_desc = mvneta_txq_next_desc_get(txq);

	tx_cmd = mvneta_skb_tx_csum(pp, skb);

	tx_desc->data_size = skb_headlen(skb);

	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
						tx_desc->data_size,
						DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev->dev.parent,
				       tx_desc->buf_phys_addr))) {
		mvneta_txq_desc_put(txq);
		frags = 0;
		goto out;
	}

2802
	buf->type = MVNETA_TYPE_SKB;
2803 2804 2805 2806
	if (frags == 1) {
		/* First and Last descriptor */
		tx_cmd |= MVNETA_TXD_FLZ_DESC;
		tx_desc->command = tx_cmd;
2807
		buf->skb = skb;
2808 2809 2810 2811
		mvneta_txq_inc_put(txq);
	} else {
		/* First but not Last */
		tx_cmd |= MVNETA_TXD_F_DESC;
2812
		buf->skb = NULL;
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
		mvneta_txq_inc_put(txq);
		tx_desc->command = tx_cmd;
		/* Continue with other skb fragments */
		if (mvneta_tx_frag_process(pp, skb, txq)) {
			dma_unmap_single(dev->dev.parent,
					 tx_desc->buf_phys_addr,
					 tx_desc->data_size,
					 DMA_TO_DEVICE);
			mvneta_txq_desc_put(txq);
			frags = 0;
			goto out;
		}
	}

out:
	if (frags > 0) {
2829
		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2830
		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2831

M
Marcin Wojtas 已提交
2832 2833
		netdev_tx_sent_queue(nq, len);

2834
		txq->count += frags;
2835
		if (txq->count >= txq->tx_stop_threshold)
2836
			netif_tx_stop_queue(nq);
2837

2838
		if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2839 2840 2841 2842 2843
		    txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
			mvneta_txq_pend_desc_add(pp, txq, frags);
		else
			txq->pending += frags;

2844
		u64_stats_update_begin(&stats->syncp);
2845 2846
		stats->es.ps.tx_bytes += len;
		stats->es.ps.tx_packets++;
2847
		u64_stats_update_end(&stats->syncp);
2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
	} else {
		dev->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
	}

	return NETDEV_TX_OK;
}


/* Free tx resources, when resetting a port */
static void mvneta_txq_done_force(struct mvneta_port *pp,
				  struct mvneta_tx_queue *txq)

{
M
Marcin Wojtas 已提交
2862
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2863 2864
	int tx_done = txq->count;

2865
	mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
2866 2867 2868 2869 2870 2871 2872

	/* reset txq */
	txq->count = 0;
	txq->txq_put_index = 0;
	txq->txq_get_index = 0;
}

2873 2874 2875
/* Handle tx done - called in softirq context. The <cause_tx_done> argument
 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
 */
2876
static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2877 2878 2879
{
	struct mvneta_tx_queue *txq;
	struct netdev_queue *nq;
2880
	int cpu = smp_processor_id();
2881

2882
	while (cause_tx_done) {
2883 2884 2885
		txq = mvneta_tx_done_policy(pp, cause_tx_done);

		nq = netdev_get_tx_queue(pp->dev, txq->id);
2886
		__netif_tx_lock(nq, cpu);
2887

2888 2889
		if (txq->count)
			mvneta_txq_done(pp, txq);
2890 2891 2892 2893 2894 2895

		__netif_tx_unlock(nq);
		cause_tx_done &= ~((1 << txq->id));
	}
}

2896
/* Compute crc8 of the specified address, using a unique algorithm ,
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
 * according to hw spec, different than generic crc8 algorithm
 */
static int mvneta_addr_crc(unsigned char *addr)
{
	int crc = 0;
	int i;

	for (i = 0; i < ETH_ALEN; i++) {
		int j;

		crc = (crc ^ addr[i]) << 8;
		for (j = 7; j >= 0; j--) {
			if (crc & (0x100 << j))
				crc ^= 0x107 << j;
		}
	}

	return crc;
}

/* This method controls the net device special MAC multicast support.
 * The Special Multicast Table for MAC addresses supports MAC of the form
 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 * Table entries in the DA-Filter table. This method set the Special
 * Multicast Table appropriate entry.
 */
static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
					  unsigned char last_byte,
					  int queue)
{
	unsigned int smc_table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	/* Register offset from SMC table base    */
	tbl_offset = (last_byte / 4);
	/* Entry offset within the above reg */
	reg_offset = last_byte % 4;

	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
					+ tbl_offset * 4));

	if (queue == -1)
		smc_table_reg &= ~(0xff << (8 * reg_offset));
	else {
		smc_table_reg &= ~(0xff << (8 * reg_offset));
		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
		    smc_table_reg);
}

/* This method controls the network device Other MAC multicast support.
 * The Other Multicast Table is used for multicast of another type.
 * A CRC-8 is used as an index to the Other Multicast Table entries
 * in the DA-Filter table.
 * The method gets the CRC-8 value from the calling routine and
 * sets the Other Multicast Table appropriate entry according to the
 * specified CRC-8 .
 */
static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
					unsigned char crc8,
					int queue)
{
	unsigned int omc_table_reg;
	unsigned int tbl_offset;
	unsigned int reg_offset;

	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */

	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);

	if (queue == -1) {
		/* Clear accepts frame bit at specified Other DA table entry */
		omc_table_reg &= ~(0xff << (8 * reg_offset));
	} else {
		omc_table_reg &= ~(0xff << (8 * reg_offset));
		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
	}

	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
}

/* The network device supports multicast using two tables:
 *    1) Special Multicast Table for MAC addresses of the form
 *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
 *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
 *       Table entries in the DA-Filter table.
 *    2) Other Multicast Table for multicast of another type. A CRC-8 value
 *       is used as an index to the Other Multicast Table entries in the
 *       DA-Filter table.
 */
static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
				 int queue)
{
	unsigned char crc_result = 0;

	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
		return 0;
	}

	crc_result = mvneta_addr_crc(p_addr);
	if (queue == -1) {
		if (pp->mcast_count[crc_result] == 0) {
			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
				    crc_result);
			return -EINVAL;
		}

		pp->mcast_count[crc_result]--;
		if (pp->mcast_count[crc_result] != 0) {
			netdev_info(pp->dev,
				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
				    pp->mcast_count[crc_result], crc_result);
			return -EINVAL;
		}
	} else
		pp->mcast_count[crc_result]++;

	mvneta_set_other_mcast_addr(pp, crc_result, queue);

	return 0;
}

/* Configure Fitering mode of Ethernet port */
static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
					  int is_promisc)
{
	u32 port_cfg_reg, val;

	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);

	val = mvreg_read(pp, MVNETA_TYPE_PRIO);

	/* Set / Clear UPM bit in port configuration register */
	if (is_promisc) {
		/* Accept all Unicast addresses */
		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
		val |= MVNETA_FORCE_UNI;
		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
	} else {
		/* Reject all Unicast addresses */
		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
		val &= ~MVNETA_FORCE_UNI;
	}

	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
}

/* register unicast and multicast addresses */
static void mvneta_set_rx_mode(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
	struct netdev_hw_addr *ha;

	if (dev->flags & IFF_PROMISC) {
		/* Accept all: Multicast + Unicast */
		mvneta_rx_unicast_promisc_set(pp, 1);
3061 3062 3063
		mvneta_set_ucast_table(pp, pp->rxq_def);
		mvneta_set_special_mcast_table(pp, pp->rxq_def);
		mvneta_set_other_mcast_table(pp, pp->rxq_def);
3064 3065 3066 3067
	} else {
		/* Accept single Unicast */
		mvneta_rx_unicast_promisc_set(pp, 0);
		mvneta_set_ucast_table(pp, -1);
3068
		mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3069 3070 3071

		if (dev->flags & IFF_ALLMULTI) {
			/* Accept all multicast */
3072 3073
			mvneta_set_special_mcast_table(pp, pp->rxq_def);
			mvneta_set_other_mcast_table(pp, pp->rxq_def);
3074 3075 3076 3077 3078 3079 3080 3081
		} else {
			/* Accept only initialized multicast */
			mvneta_set_special_mcast_table(pp, -1);
			mvneta_set_other_mcast_table(pp, -1);

			if (!netdev_mc_empty(dev)) {
				netdev_for_each_mc_addr(ha, dev) {
					mvneta_mcast_addr_set(pp, ha->addr,
3082
							      pp->rxq_def);
3083 3084 3085 3086 3087 3088 3089 3090
				}
			}
		}
	}
}

/* Interrupt handling - the callback for request_irq() */
static irqreturn_t mvneta_isr(int irq, void *dev_id)
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
{
	struct mvneta_port *pp = (struct mvneta_port *)dev_id;

	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
	napi_schedule(&pp->napi);

	return IRQ_HANDLED;
}

/* Interrupt handling - the callback for request_percpu_irq() */
static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3102
{
3103
	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3104

3105 3106
	disable_percpu_irq(port->pp->dev->irq);
	napi_schedule(&port->napi);
3107 3108 3109 3110

	return IRQ_HANDLED;
}

R
Russell King 已提交
3111
static void mvneta_link_change(struct mvneta_port *pp)
3112 3113 3114
{
	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);

R
Russell King 已提交
3115
	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3116 3117
}

3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
/* NAPI handler
 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
 * Bits 8 -15 of the cause Rx Tx register indicate that are received
 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
 * Each CPU has its own causeRxTx register
 */
static int mvneta_poll(struct napi_struct *napi, int budget)
{
	int rx_done = 0;
	u32 cause_rx_tx;
3129
	int rx_queue;
3130
	struct mvneta_port *pp = netdev_priv(napi->dev);
3131
	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3132 3133

	if (!netif_running(pp->dev)) {
3134
		napi_complete(napi);
3135 3136 3137 3138
		return rx_done;
	}

	/* Read cause register */
3139 3140 3141 3142 3143
	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);

		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
R
Russell King 已提交
3144 3145

		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3146
				  MVNETA_CAUSE_LINK_CHANGE))
R
Russell King 已提交
3147
			mvneta_link_change(pp);
3148
	}
3149 3150 3151

	/* Release Tx descriptors */
	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3152
		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3153 3154
		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
	}
3155

3156
	/* For the case where the last mvneta_poll did not process all
3157 3158
	 * RX packets
	 */
3159 3160
	cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
		port->cause_rx_tx;
3161

3162
	rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3163 3164
	if (rx_queue) {
		rx_queue = rx_queue - 1;
3165
		if (pp->bm_priv)
3166 3167
			rx_done = mvneta_rx_hwbm(napi, pp, budget,
						 &pp->rxqs[rx_queue]);
3168
		else
3169 3170
			rx_done = mvneta_rx_swbm(napi, pp, budget,
						 &pp->rxqs[rx_queue]);
3171 3172
	}

3173
	if (rx_done < budget) {
3174
		cause_rx_tx = 0;
3175
		napi_complete_done(napi, rx_done);
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188

		if (pp->neta_armada3700) {
			unsigned long flags;

			local_irq_save(flags);
			mvreg_write(pp, MVNETA_INTR_NEW_MASK,
				    MVNETA_RX_INTR_MASK(rxq_number) |
				    MVNETA_TX_INTR_MASK(txq_number) |
				    MVNETA_MISCINTR_INTR_MASK);
			local_irq_restore(flags);
		} else {
			enable_percpu_irq(pp->dev->irq, 0);
		}
3189 3190
	}

3191 3192 3193 3194 3195
	if (pp->neta_armada3700)
		pp->cause_rx_tx = cause_rx_tx;
	else
		port->cause_rx_tx = cause_rx_tx;

3196 3197 3198
	return rx_done;
}

3199 3200 3201
static int mvneta_create_page_pool(struct mvneta_port *pp,
				   struct mvneta_rx_queue *rxq, int size)
{
3202
	struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3203 3204
	struct page_pool_params pp_params = {
		.order = 0,
3205
		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3206
		.pool_size = size,
3207
		.nid = NUMA_NO_NODE,
3208
		.dev = pp->dev->dev.parent,
3209
		.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3210 3211
		.offset = pp->rx_offset_correction,
		.max_len = MVNETA_MAX_RX_BUF_SIZE,
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
	};
	int err;

	rxq->page_pool = page_pool_create(&pp_params);
	if (IS_ERR(rxq->page_pool)) {
		err = PTR_ERR(rxq->page_pool);
		rxq->page_pool = NULL;
		return err;
	}

	err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
	if (err < 0)
		goto err_free_pp;

	err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
					 rxq->page_pool);
	if (err)
		goto err_unregister_rxq;

	return 0;

err_unregister_rxq:
	xdp_rxq_info_unreg(&rxq->xdp_rxq);
err_free_pp:
	page_pool_destroy(rxq->page_pool);
	rxq->page_pool = NULL;
	return err;
}

3241 3242 3243 3244
/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
			   int num)
{
3245 3246 3247 3248 3249
	int i, err;

	err = mvneta_create_page_pool(pp, rxq, num);
	if (err < 0)
		return err;
3250 3251

	for (i = 0; i < num; i++) {
3252
		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3253 3254 3255 3256 3257
		if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
				     GFP_KERNEL) != 0) {
			netdev_err(pp->dev,
				   "%s:rxq %d, %d of %d buffs  filled\n",
				   __func__, rxq->id, i, num);
3258 3259 3260 3261 3262
			break;
		}
	}

	/* Add this number of RX descriptors as non occupied (ready to
3263 3264
	 * get packets)
	 */
3265 3266 3267 3268 3269 3270 3271 3272 3273 3274
	mvneta_rxq_non_occup_desc_add(pp, rxq, i);

	return i;
}

/* Free all packets pending transmit from all TXQs and reset TX port */
static void mvneta_tx_reset(struct mvneta_port *pp)
{
	int queue;

3275
	/* free the skb's in the tx ring */
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290
	for (queue = 0; queue < txq_number; queue++)
		mvneta_txq_done_force(pp, &pp->txqs[queue]);

	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
}

static void mvneta_rx_reset(struct mvneta_port *pp)
{
	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
}

/* Rx/Tx queue initialization/cleanup methods */

3291 3292
static int mvneta_rxq_sw_init(struct mvneta_port *pp,
			      struct mvneta_rx_queue *rxq)
3293 3294 3295 3296 3297 3298 3299
{
	rxq->size = pp->rx_ring_size;

	/* Allocate memory for RX descriptors */
	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
					&rxq->descs_phys, GFP_KERNEL);
3300
	if (!rxq->descs)
3301 3302 3303 3304
		return -ENOMEM;

	rxq->last_desc = rxq->size - 1;

3305 3306 3307 3308 3309 3310
	return 0;
}

static void mvneta_rxq_hw_init(struct mvneta_port *pp,
			       struct mvneta_rx_queue *rxq)
{
3311 3312 3313 3314 3315 3316 3317 3318
	/* Set Rx descriptors queue starting address */
	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);

	/* Set coalescing pkts and time */
	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);

3319
	if (!pp->bm_priv) {
3320 3321
		/* Set Offset */
		mvneta_rxq_offset_set(pp, rxq, 0);
3322
		mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3323
					MVNETA_MAX_RX_BUF_SIZE :
3324
					MVNETA_RX_BUF_SIZE(pp->pkt_size));
3325
		mvneta_rxq_bm_disable(pp, rxq);
3326
		mvneta_rxq_fill(pp, rxq, rxq->size);
3327
	} else {
3328 3329 3330 3331
		/* Set Offset */
		mvneta_rxq_offset_set(pp, rxq,
				      NET_SKB_PAD - pp->rx_offset_correction);

3332
		mvneta_rxq_bm_enable(pp, rxq);
3333
		/* Fill RXQ with buffers from RX pool */
3334 3335
		mvneta_rxq_long_pool_set(pp, rxq);
		mvneta_rxq_short_pool_set(pp, rxq);
3336
		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3337
	}
3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
}

/* Create a specified RX queue */
static int mvneta_rxq_init(struct mvneta_port *pp,
			   struct mvneta_rx_queue *rxq)

{
	int ret;

	ret = mvneta_rxq_sw_init(pp, rxq);
	if (ret < 0)
		return ret;

	mvneta_rxq_hw_init(pp, rxq);
3352

3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371
	return 0;
}

/* Cleanup Rx queue */
static void mvneta_rxq_deinit(struct mvneta_port *pp,
			      struct mvneta_rx_queue *rxq)
{
	mvneta_rxq_drop_pkts(pp, rxq);

	if (rxq->descs)
		dma_free_coherent(pp->dev->dev.parent,
				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
				  rxq->descs,
				  rxq->descs_phys);

	rxq->descs             = NULL;
	rxq->last_desc         = 0;
	rxq->next_desc_to_proc = 0;
	rxq->descs_phys        = 0;
3372 3373
	rxq->first_to_refill   = 0;
	rxq->refill_num        = 0;
3374 3375
}

3376 3377
static int mvneta_txq_sw_init(struct mvneta_port *pp,
			      struct mvneta_tx_queue *txq)
3378
{
3379 3380
	int cpu;

3381 3382
	txq->size = pp->tx_ring_size;

3383 3384 3385 3386 3387 3388 3389
	/* A queue must always have room for at least one skb.
	 * Therefore, stop the queue when the free entries reaches
	 * the maximum number of descriptors per skb.
	 */
	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;

3390 3391 3392 3393
	/* Allocate memory for TX descriptors */
	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
					txq->size * MVNETA_DESC_ALIGNED_SIZE,
					&txq->descs_phys, GFP_KERNEL);
3394
	if (!txq->descs)
3395 3396 3397 3398
		return -ENOMEM;

	txq->last_desc = txq->size - 1;

3399
	txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
T
Tom Rix 已提交
3400
	if (!txq->buf)
3401
		return -ENOMEM;
3402 3403 3404 3405 3406

	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
					   txq->size * TSO_HEADER_SIZE,
					   &txq->tso_hdrs_phys, GFP_KERNEL);
T
Tom Rix 已提交
3407
	if (!txq->tso_hdrs)
3408
		return -ENOMEM;
3409

3410
	/* Setup XPS mapping */
3411 3412 3413
	if (pp->neta_armada3700)
		cpu = 0;
	else if (txq_number > 1)
3414 3415 3416 3417 3418 3419
		cpu = txq->id % num_present_cpus();
	else
		cpu = pp->rxq_def % num_present_cpus();
	cpumask_set_cpu(cpu, &txq->affinity_mask);
	netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);

3420 3421 3422
	return 0;
}

3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451
static void mvneta_txq_hw_init(struct mvneta_port *pp,
			       struct mvneta_tx_queue *txq)
{
	/* Set maximum bandwidth for enabled TXQs */
	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);

	/* Set Tx descriptors queue starting address */
	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);

	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
}

/* Create and initialize a tx queue */
static int mvneta_txq_init(struct mvneta_port *pp,
			   struct mvneta_tx_queue *txq)
{
	int ret;

	ret = mvneta_txq_sw_init(pp, txq);
	if (ret < 0)
		return ret;

	mvneta_txq_hw_init(pp, txq);

	return 0;
}

3452
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3453 3454
static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
3455
{
M
Marcin Wojtas 已提交
3456 3457
	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);

3458
	kfree(txq->buf);
3459

3460 3461 3462 3463
	if (txq->tso_hdrs)
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * TSO_HEADER_SIZE,
				  txq->tso_hdrs, txq->tso_hdrs_phys);
3464 3465 3466 3467 3468
	if (txq->descs)
		dma_free_coherent(pp->dev->dev.parent,
				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
				  txq->descs, txq->descs_phys);

M
Marcin Wojtas 已提交
3469 3470
	netdev_tx_reset_queue(nq);

3471 3472 3473 3474
	txq->descs             = NULL;
	txq->last_desc         = 0;
	txq->next_desc_to_proc = 0;
	txq->descs_phys        = 0;
3475
}
3476

3477 3478 3479
static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
				 struct mvneta_tx_queue *txq)
{
3480 3481 3482 3483 3484 3485 3486 3487 3488
	/* Set minimum bandwidth for disabled TXQs */
	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);

	/* Set Tx descriptors queue starting address and size */
	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
}

3489 3490 3491 3492 3493 3494 3495
static void mvneta_txq_deinit(struct mvneta_port *pp,
			      struct mvneta_tx_queue *txq)
{
	mvneta_txq_sw_deinit(pp, txq);
	mvneta_txq_hw_deinit(pp, txq);
}

3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
/* Cleanup all Tx queues */
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
{
	int queue;

	for (queue = 0; queue < txq_number; queue++)
		mvneta_txq_deinit(pp, &pp->txqs[queue]);
}

/* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{
3508 3509
	int queue;

3510
	for (queue = 0; queue < rxq_number; queue++)
3511
		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3512 3513 3514 3515 3516 3517
}


/* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp)
{
3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
	int queue;

	for (queue = 0; queue < rxq_number; queue++) {
		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);

		if (err) {
			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
				   __func__, queue);
			mvneta_cleanup_rxqs(pp);
			return err;
		}
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551
	}

	return 0;
}

/* Init all tx queues */
static int mvneta_setup_txqs(struct mvneta_port *pp)
{
	int queue;

	for (queue = 0; queue < txq_number; queue++) {
		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
		if (err) {
			netdev_err(pp->dev, "%s: can't create txq=%d\n",
				   __func__, queue);
			mvneta_cleanup_txqs(pp);
			return err;
		}
	}

	return 0;
}

3552
static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3553 3554 3555
{
	int ret;

3556
	ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3557 3558 3559 3560 3561 3562
	if (ret)
		return ret;

	return phy_power_on(pp->comphy);
}

3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585
static int mvneta_config_interface(struct mvneta_port *pp,
				   phy_interface_t interface)
{
	int ret = 0;

	if (pp->comphy) {
		if (interface == PHY_INTERFACE_MODE_SGMII ||
		    interface == PHY_INTERFACE_MODE_1000BASEX ||
		    interface == PHY_INTERFACE_MODE_2500BASEX) {
			ret = mvneta_comphy_init(pp, interface);
		}
	} else {
		switch (interface) {
		case PHY_INTERFACE_MODE_QSGMII:
			mvreg_write(pp, MVNETA_SERDES_CFG,
				    MVNETA_QSGMII_SERDES_PROTO);
			break;

		case PHY_INTERFACE_MODE_SGMII:
		case PHY_INTERFACE_MODE_1000BASEX:
			mvreg_write(pp, MVNETA_SERDES_CFG,
				    MVNETA_SGMII_SERDES_PROTO);
			break;
3586 3587 3588 3589 3590

		case PHY_INTERFACE_MODE_2500BASEX:
			mvreg_write(pp, MVNETA_SERDES_CFG,
				    MVNETA_HSGMII_SERDES_PROTO);
			break;
3591
		default:
3592
			break;
3593 3594 3595 3596 3597 3598 3599 3600
		}
	}

	pp->phy_interface = interface;

	return ret;
}

3601 3602
static void mvneta_start_dev(struct mvneta_port *pp)
{
3603
	int cpu;
3604

3605
	WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3606

3607 3608 3609 3610 3611 3612
	mvneta_max_rx_size_set(pp, pp->pkt_size);
	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);

	/* start the Rx/Tx activity */
	mvneta_port_enable(pp);

3613 3614 3615 3616 3617
	if (!pp->neta_armada3700) {
		/* Enable polling on the port */
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
3618

3619 3620 3621 3622
			napi_enable(&port->napi);
		}
	} else {
		napi_enable(&pp->napi);
3623
	}
3624

3625
	/* Unmask interrupts. It has to be done from each CPU */
3626 3627
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);

3628 3629
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
3630
		    MVNETA_CAUSE_LINK_CHANGE);
3631

R
Russell King 已提交
3632
	phylink_start(pp->phylink);
3633

3634
	/* We may have called phylink_speed_down before */
3635 3636
	phylink_speed_up(pp->phylink);

3637
	netif_tx_start_all_queues(pp->dev);
3638 3639

	clear_bit(__MVNETA_DOWN, &pp->state);
3640 3641 3642 3643
}

static void mvneta_stop_dev(struct mvneta_port *pp)
{
3644 3645
	unsigned int cpu;

3646 3647
	set_bit(__MVNETA_DOWN, &pp->state);

3648 3649 3650
	if (device_may_wakeup(&pp->dev->dev))
		phylink_speed_down(pp->phylink, false);

R
Russell King 已提交
3651
	phylink_stop(pp->phylink);
3652

3653 3654 3655 3656
	if (!pp->neta_armada3700) {
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
3657

3658 3659 3660 3661
			napi_disable(&port->napi);
		}
	} else {
		napi_disable(&pp->napi);
3662
	}
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672

	netif_carrier_off(pp->dev);

	mvneta_port_down(pp);
	netif_tx_stop_all_queues(pp->dev);

	/* Stop the port activity */
	mvneta_port_disable(pp);

	/* Clear all ethernet port interrupts */
3673
	on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3674 3675

	/* Mask all ethernet port interrupts */
3676
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3677 3678 3679

	mvneta_tx_reset(pp);
	mvneta_rx_reset(pp);
3680 3681

	WARN_ON(phy_power_off(pp->comphy));
3682 3683
}

3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
static void mvneta_percpu_enable(void *arg)
{
	struct mvneta_port *pp = arg;

	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
}

static void mvneta_percpu_disable(void *arg)
{
	struct mvneta_port *pp = arg;

	disable_percpu_irq(pp->dev->irq);
}

3698 3699 3700 3701 3702 3703
/* Change the device mtu */
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
	struct mvneta_port *pp = netdev_priv(dev);
	int ret;

3704 3705 3706 3707 3708
	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
			    mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
	}
3709

3710 3711 3712 3713 3714
	if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
		netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
		return -EINVAL;
	}

3715 3716
	dev->mtu = mtu;

3717
	if (!netif_running(dev)) {
3718 3719 3720
		if (pp->bm_priv)
			mvneta_bm_update_mtu(pp, mtu);

3721
		netdev_update_features(dev);
3722
		return 0;
3723
	}
3724

3725
	/* The interface is running, so we have to force a
3726
	 * reallocation of the queues
3727 3728
	 */
	mvneta_stop_dev(pp);
3729
	on_each_cpu(mvneta_percpu_disable, pp, true);
3730 3731 3732 3733

	mvneta_cleanup_txqs(pp);
	mvneta_cleanup_rxqs(pp);

3734 3735 3736
	if (pp->bm_priv)
		mvneta_bm_update_mtu(pp, mtu);

3737
	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3738 3739 3740

	ret = mvneta_setup_rxqs(pp);
	if (ret) {
3741
		netdev_err(dev, "unable to setup rxqs after MTU change\n");
3742 3743 3744
		return ret;
	}

3745 3746 3747 3748 3749
	ret = mvneta_setup_txqs(pp);
	if (ret) {
		netdev_err(dev, "unable to setup txqs after MTU change\n");
		return ret;
	}
3750

3751
	on_each_cpu(mvneta_percpu_enable, pp, true);
3752 3753
	mvneta_start_dev(pp);

3754 3755
	netdev_update_features(dev);

3756 3757 3758
	return 0;
}

3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773
static netdev_features_t mvneta_fix_features(struct net_device *dev,
					     netdev_features_t features)
{
	struct mvneta_port *pp = netdev_priv(dev);

	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
		netdev_info(dev,
			    "Disable IP checksum for MTU greater than %dB\n",
			    pp->tx_csum_limit);
	}

	return features;
}

3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788
/* Get mac address */
static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
{
	u32 mac_addr_l, mac_addr_h;

	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
	addr[0] = (mac_addr_h >> 24) & 0xFF;
	addr[1] = (mac_addr_h >> 16) & 0xFF;
	addr[2] = (mac_addr_h >> 8) & 0xFF;
	addr[3] = mac_addr_h & 0xFF;
	addr[4] = (mac_addr_l >> 8) & 0xFF;
	addr[5] = mac_addr_l & 0xFF;
}

3789 3790 3791 3792
/* Handle setting mac address */
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{
	struct mvneta_port *pp = netdev_priv(dev);
3793 3794
	struct sockaddr *sockaddr = addr;
	int ret;
3795

3796 3797 3798
	ret = eth_prepare_mac_addr_change(dev, addr);
	if (ret < 0)
		return ret;
3799 3800 3801 3802
	/* Remove previous address table entry */
	mvneta_mac_addr_set(pp, dev->dev_addr, -1);

	/* Set new addr in hw */
3803
	mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3804

3805
	eth_commit_mac_addr_change(dev, addr);
3806 3807 3808
	return 0;
}

3809 3810
static void mvneta_validate(struct phylink_config *config,
			    unsigned long *supported,
R
Russell King 已提交
3811 3812
			    struct phylink_link_state *state)
{
3813
	struct net_device *ndev = to_net_dev(config->dev);
3814
	struct mvneta_port *pp = netdev_priv(ndev);
R
Russell King 已提交
3815 3816
	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };

3817
	/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
R
Russell King 已提交
3818 3819 3820
	if (state->interface != PHY_INTERFACE_MODE_NA &&
	    state->interface != PHY_INTERFACE_MODE_QSGMII &&
	    state->interface != PHY_INTERFACE_MODE_SGMII &&
3821
	    !phy_interface_mode_is_8023z(state->interface) &&
R
Russell King 已提交
3822 3823 3824 3825 3826 3827 3828 3829 3830
	    !phy_interface_mode_is_rgmii(state->interface)) {
		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
		return;
	}

	/* Allow all the expected bits */
	phylink_set(mask, Autoneg);
	phylink_set_port_modes(mask);

3831 3832
	/* Asymmetric pause is unsupported */
	phylink_set(mask, Pause);
3833

3834
	/* Half-duplex at speeds higher than 100Mbit is unsupported */
3835 3836 3837 3838 3839
	if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
		phylink_set(mask, 1000baseT_Full);
		phylink_set(mask, 1000baseX_Full);
	}
	if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3840
		phylink_set(mask, 2500baseT_Full);
3841 3842
		phylink_set(mask, 2500baseX_Full);
	}
3843 3844 3845 3846 3847 3848 3849 3850

	if (!phy_interface_mode_is_8023z(state->interface)) {
		/* 10M and 100M are only supported in non-802.3z mode */
		phylink_set(mask, 10baseT_Half);
		phylink_set(mask, 10baseT_Full);
		phylink_set(mask, 100baseT_Half);
		phylink_set(mask, 100baseT_Full);
	}
R
Russell King 已提交
3851 3852 3853 3854 3855

	bitmap_and(supported, supported, mask,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
	bitmap_and(state->advertising, state->advertising, mask,
		   __ETHTOOL_LINK_MODE_MASK_NBITS);
3856 3857 3858 3859 3860

	/* We can only operate at 2500BaseX or 1000BaseX.  If requested
	 * to advertise both, only report advertising at 2500BaseX.
	 */
	phylink_helper_basex_speed(state);
R
Russell King 已提交
3861 3862
}

3863 3864
static void mvneta_mac_pcs_get_state(struct phylink_config *config,
				     struct phylink_link_state *state)
3865
{
3866
	struct net_device *ndev = to_net_dev(config->dev);
3867
	struct mvneta_port *pp = netdev_priv(ndev);
R
Russell King 已提交
3868
	u32 gmac_stat;
3869

R
Russell King 已提交
3870
	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3871

R
Russell King 已提交
3872
	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3873 3874 3875
		state->speed =
			state->interface == PHY_INTERFACE_MODE_2500BASEX ?
			SPEED_2500 : SPEED_1000;
R
Russell King 已提交
3876 3877 3878 3879
	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
		state->speed = SPEED_100;
	else
		state->speed = SPEED_10;
3880

R
Russell King 已提交
3881 3882 3883
	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3884

R
Russell King 已提交
3885
	state->pause = 0;
3886 3887 3888 3889
	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
		state->pause |= MLO_PAUSE_RX;
	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
		state->pause |= MLO_PAUSE_TX;
R
Russell King 已提交
3890 3891
}

3892
static void mvneta_mac_an_restart(struct phylink_config *config)
3893
{
3894
	struct net_device *ndev = to_net_dev(config->dev);
3895 3896 3897 3898 3899 3900 3901 3902 3903
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);

	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}

3904 3905
static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
			      const struct phylink_link_state *state)
R
Russell King 已提交
3906
{
3907
	struct net_device *ndev = to_net_dev(config->dev);
R
Russell King 已提交
3908
	struct mvneta_port *pp = netdev_priv(ndev);
3909
	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
R
Russell King 已提交
3910
	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3911
	u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
R
Russell King 已提交
3912 3913 3914
	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);

3915
	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3916 3917
	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
				   MVNETA_GMAC2_PORT_RESET);
3918
	new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
R
Russell King 已提交
3919 3920 3921 3922
	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
			     MVNETA_GMAC_INBAND_RESTART_AN |
			     MVNETA_GMAC_AN_SPEED_EN |
3923
			     MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
R
Russell King 已提交
3924 3925 3926
			     MVNETA_GMAC_AN_FLOW_CTRL_EN |
			     MVNETA_GMAC_AN_DUPLEX_EN);

3927 3928 3929 3930 3931 3932
	/* Even though it might look weird, when we're configured in
	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
	 */
	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;

	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3933 3934
	    state->interface == PHY_INTERFACE_MODE_SGMII ||
	    phy_interface_mode_is_8023z(state->interface))
3935 3936
		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;

3937 3938 3939
	if (phylink_test(state->advertising, Pause))
		new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;

R
Russell King 已提交
3940
	if (!phylink_autoneg_inband(mode)) {
3941 3942 3943
		/* Phy or fixed speed - nothing to do, leave the
		 * configured speed, duplex and flow control as-is.
		 */
3944
	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
R
Russell King 已提交
3945 3946 3947 3948
		/* SGMII mode receives the state from the PHY */
		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3949 3950 3951 3952
				     MVNETA_GMAC_FORCE_LINK_PASS |
				     MVNETA_GMAC_CONFIG_MII_SPEED |
				     MVNETA_GMAC_CONFIG_GMII_SPEED |
				     MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
R
Russell King 已提交
3953 3954 3955
			 MVNETA_GMAC_INBAND_AN_ENABLE |
			 MVNETA_GMAC_AN_SPEED_EN |
			 MVNETA_GMAC_AN_DUPLEX_EN;
3956 3957 3958 3959 3960
	} else {
		/* 802.3z negotiation - only 1000base-X */
		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3961 3962
				     MVNETA_GMAC_FORCE_LINK_PASS |
				     MVNETA_GMAC_CONFIG_MII_SPEED)) |
3963 3964 3965 3966
			 MVNETA_GMAC_INBAND_AN_ENABLE |
			 MVNETA_GMAC_CONFIG_GMII_SPEED |
			 /* The MAC only supports FD mode */
			 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3967 3968 3969

		if (state->pause & MLO_PAUSE_AN && state->an_enabled)
			new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
R
Russell King 已提交
3970
	}
3971

R
Russell King 已提交
3972 3973 3974
	/* Armada 370 documentation says we can only change the port mode
	 * and in-band enable when the link is down, so force it down
	 * while making these changes. We also do this for GMAC_CTRL2 */
3975 3976
	if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
	    (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
R
Russell King 已提交
3977 3978 3979 3980
	    (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
			    (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
			    MVNETA_GMAC_FORCE_LINK_DOWN);
3981
	}
R
Russell King 已提交
3982

3983

3984 3985 3986
	/* When at 2.5G, the link partner can send frames with shortened
	 * preambles.
	 */
3987
	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
3988 3989
		new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;

3990 3991 3992 3993
	if (pp->phy_interface != state->interface) {
		if (pp->comphy)
			WARN_ON(phy_power_off(pp->comphy));
		WARN_ON(mvneta_config_interface(pp, state->interface));
3994
	}
3995

3996 3997
	if (new_ctrl0 != gmac_ctrl0)
		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
R
Russell King 已提交
3998 3999
	if (new_ctrl2 != gmac_ctrl2)
		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4000 4001
	if (new_ctrl4 != gmac_ctrl4)
		mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
R
Russell King 已提交
4002 4003 4004 4005
	if (new_clk != gmac_clk)
		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
	if (new_an != gmac_an)
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
4006 4007 4008 4009 4010 4011

	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
			MVNETA_GMAC2_PORT_RESET) != 0)
			continue;
	}
4012
}
4013

R
Russell King 已提交
4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025
static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
{
	u32 lpi_ctl1;

	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
	if (enable)
		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
	else
		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
}

4026 4027
static void mvneta_mac_link_down(struct phylink_config *config,
				 unsigned int mode, phy_interface_t interface)
4028
{
4029
	struct net_device *ndev = to_net_dev(config->dev);
4030 4031 4032
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 val;

R
Russell King 已提交
4033 4034 4035
	mvneta_port_down(pp);

	if (!phylink_autoneg_inband(mode)) {
4036 4037 4038 4039 4040
		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
	}
R
Russell King 已提交
4041 4042 4043

	pp->eee_active = false;
	mvneta_set_eee(pp, false);
4044 4045
}

4046 4047 4048 4049 4050
static void mvneta_mac_link_up(struct phylink_config *config,
			       struct phy_device *phy,
			       unsigned int mode, phy_interface_t interface,
			       int speed, int duplex,
			       bool tx_pause, bool rx_pause)
4051
{
4052
	struct net_device *ndev = to_net_dev(config->dev);
4053 4054 4055
	struct mvneta_port *pp = netdev_priv(ndev);
	u32 val;

R
Russell King 已提交
4056
	if (!phylink_autoneg_inband(mode)) {
4057
		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4058 4059 4060 4061 4062
		val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
			 MVNETA_GMAC_CONFIG_MII_SPEED |
			 MVNETA_GMAC_CONFIG_GMII_SPEED |
			 MVNETA_GMAC_CONFIG_FLOW_CTRL |
			 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
4063
		val |= MVNETA_GMAC_FORCE_LINK_PASS;
4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087

		if (speed == SPEED_1000 || speed == SPEED_2500)
			val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
		else if (speed == SPEED_100)
			val |= MVNETA_GMAC_CONFIG_MII_SPEED;

		if (duplex == DUPLEX_FULL)
			val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;

		if (tx_pause || rx_pause)
			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;

		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
	} else {
		/* When inband doesn't cover flow control or flow control is
		 * disabled, we need to manually configure it. This bit will
		 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
		 */
		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
		val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;

		if (tx_pause || rx_pause)
			val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;

4088
		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4089 4090
	}

4091
	mvneta_port_up(pp);
R
Russell King 已提交
4092 4093 4094 4095 4096

	if (phy && pp->eee_enabled) {
		pp->eee_active = phy_init_eee(phy, 0) >= 0;
		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
	}
4097 4098
}

R
Russell King 已提交
4099 4100
static const struct phylink_mac_ops mvneta_phylink_ops = {
	.validate = mvneta_validate,
4101
	.mac_pcs_get_state = mvneta_mac_pcs_get_state,
4102
	.mac_an_restart = mvneta_mac_an_restart,
R
Russell King 已提交
4103 4104 4105 4106
	.mac_config = mvneta_mac_config,
	.mac_link_down = mvneta_mac_link_down,
	.mac_link_up = mvneta_mac_link_up,
};
4107 4108 4109

static int mvneta_mdio_probe(struct mvneta_port *pp)
{
4110
	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
R
Russell King 已提交
4111
	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4112

R
Russell King 已提交
4113 4114
	if (err)
		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4115

R
Russell King 已提交
4116
	phylink_ethtool_get_wol(pp->phylink, &wol);
4117 4118
	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);

4119 4120 4121 4122
	/* PHY WoL may be enabled but device wakeup disabled */
	if (wol.supported)
		device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);

R
Russell King 已提交
4123
	return err;
4124 4125 4126 4127
}

static void mvneta_mdio_remove(struct mvneta_port *pp)
{
R
Russell King 已提交
4128
	phylink_disconnect_phy(pp->phylink);
4129 4130
}

4131 4132 4133 4134
/* Electing a CPU must be done in an atomic way: it should be done
 * after or before the removal/insertion of a CPU and this function is
 * not reentrant.
 */
4135 4136
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
4137 4138 4139 4140 4141
	int elected_cpu = 0, max_cpu, cpu, i = 0;

	/* Use the cpu associated to the rxq when it is online, in all
	 * the other cases, use the cpu 0 which can't be offline.
	 */
4142
	if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
4143
		elected_cpu = pp->rxq_def;
4144

4145
	max_cpu = num_present_cpus();
4146 4147

	for_each_online_cpu(cpu) {
4148 4149 4150 4151 4152 4153 4154
		int rxq_map = 0, txq_map = 0;
		int rxq;

		for (rxq = 0; rxq < rxq_number; rxq++)
			if ((rxq % max_cpu) == cpu)
				rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);

4155
		if (cpu == elected_cpu)
4156 4157
			/* Map the default receive queue queue to the
			 * elected CPU
4158
			 */
4159
			rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4160 4161 4162 4163 4164 4165

		/* We update the TX queue map only if we have one
		 * queue. In this case we associate the TX queue to
		 * the CPU bound to the default RX queue
		 */
		if (txq_number == 1)
4166
			txq_map = (cpu == elected_cpu) ?
4167 4168 4169 4170 4171
				MVNETA_CPU_TXQ_ACCESS(1) : 0;
		else
			txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
				MVNETA_CPU_TXQ_ACCESS_ALL_MASK;

4172 4173 4174 4175 4176 4177 4178
		mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);

		/* Update the interrupt mask on each CPU according the
		 * new mapping
		 */
		smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
					 pp, true);
4179
		i++;
4180

4181 4182 4183
	}
};

4184
static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4185
{
4186 4187 4188
	int other_cpu;
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_online);
4189 4190
	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);

4191 4192 4193 4194 4195
	/* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
	 * are routed to CPU 0, so we don't need all the cpu-hotplug support
	 */
	if (pp->neta_armada3700)
		return 0;
4196

4197 4198 4199 4200 4201 4202 4203 4204 4205 4206
	spin_lock(&pp->lock);
	/*
	 * Configuring the driver for a new CPU while the driver is
	 * stopping is racy, so just avoid it.
	 */
	if (pp->is_stopped) {
		spin_unlock(&pp->lock);
		return 0;
	}
	netif_tx_stop_all_queues(pp->dev);
4207

4208 4209 4210 4211 4212 4213 4214 4215 4216 4217
	/*
	 * We have to synchronise on tha napi of each CPU except the one
	 * just being woken up
	 */
	for_each_online_cpu(other_cpu) {
		if (other_cpu != cpu) {
			struct mvneta_pcpu_port *other_port =
				per_cpu_ptr(pp->ports, other_cpu);

			napi_synchronize(&other_port->napi);
4218
		}
4219
	}
4220

4221 4222 4223
	/* Mask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
	napi_enable(&port->napi);
4224

4225 4226 4227 4228 4229
	/*
	 * Enable per-CPU interrupts on the CPU that is
	 * brought up.
	 */
	mvneta_percpu_enable(pp);
4230

4231 4232 4233 4234 4235
	/*
	 * Enable per-CPU interrupt on the one CPU we care
	 * about.
	 */
	mvneta_percpu_elect(pp);
4236

4237 4238 4239 4240
	/* Unmask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4241
		    MVNETA_CAUSE_LINK_CHANGE);
4242 4243 4244 4245
	netif_tx_start_all_queues(pp->dev);
	spin_unlock(&pp->lock);
	return 0;
}
4246

4247 4248 4249 4250 4251
static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
{
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_online);
	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4252

4253 4254 4255 4256 4257 4258 4259 4260
	/*
	 * Thanks to this lock we are sure that any pending cpu election is
	 * done.
	 */
	spin_lock(&pp->lock);
	/* Mask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
	spin_unlock(&pp->lock);
4261

4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281
	napi_synchronize(&port->napi);
	napi_disable(&port->napi);
	/* Disable per-CPU interrupts on the CPU that is brought down. */
	mvneta_percpu_disable(pp);
	return 0;
}

static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
	struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
						  node_dead);

	/* Check if a new CPU must be elected now this on is down */
	spin_lock(&pp->lock);
	mvneta_percpu_elect(pp);
	spin_unlock(&pp->lock);
	/* Unmask all ethernet port interrupts */
	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
4282
		    MVNETA_CAUSE_LINK_CHANGE);
4283 4284
	netif_tx_start_all_queues(pp->dev);
	return 0;
4285 4286
}

4287 4288 4289
static int mvneta_open(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
4290
	int ret;
4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302

	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);

	ret = mvneta_setup_rxqs(pp);
	if (ret)
		return ret;

	ret = mvneta_setup_txqs(pp);
	if (ret)
		goto err_cleanup_rxqs;

	/* Connect to port interrupt line */
4303 4304 4305 4306 4307 4308
	if (pp->neta_armada3700)
		ret = request_irq(pp->dev->irq, mvneta_isr, 0,
				  dev->name, pp);
	else
		ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
					 dev->name, pp->ports);
4309 4310 4311 4312 4313
	if (ret) {
		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
		goto err_cleanup_txqs;
	}

4314 4315 4316 4317 4318
	if (!pp->neta_armada3700) {
		/* Enable per-CPU interrupt on all the CPU to handle our RX
		 * queue interrupts
		 */
		on_each_cpu(mvneta_percpu_enable, pp, true);
4319

4320 4321 4322 4323 4324 4325 4326 4327
		pp->is_stopped = false;
		/* Register a CPU notifier to handle the case where our CPU
		 * might be taken offline.
		 */
		ret = cpuhp_state_add_instance_nocalls(online_hpstate,
						       &pp->node_online);
		if (ret)
			goto err_free_irq;
4328

4329 4330 4331 4332 4333
		ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						       &pp->node_dead);
		if (ret)
			goto err_free_online_hp;
	}
4334

4335 4336 4337
	ret = mvneta_mdio_probe(pp);
	if (ret < 0) {
		netdev_err(dev, "cannot probe MDIO bus\n");
4338
		goto err_free_dead_hp;
4339 4340 4341 4342 4343 4344
	}

	mvneta_start_dev(pp);

	return 0;

4345
err_free_dead_hp:
4346 4347 4348
	if (!pp->neta_armada3700)
		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						    &pp->node_dead);
4349
err_free_online_hp:
4350 4351 4352
	if (!pp->neta_armada3700)
		cpuhp_state_remove_instance_nocalls(online_hpstate,
						    &pp->node_online);
4353
err_free_irq:
4354 4355 4356 4357 4358 4359
	if (pp->neta_armada3700) {
		free_irq(pp->dev->irq, pp);
	} else {
		on_each_cpu(mvneta_percpu_disable, pp, true);
		free_percpu_irq(pp->dev->irq, pp->ports);
	}
4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371
err_cleanup_txqs:
	mvneta_cleanup_txqs(pp);
err_cleanup_rxqs:
	mvneta_cleanup_rxqs(pp);
	return ret;
}

/* Stop the port, free port interrupt line */
static int mvneta_stop(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);

4372 4373 4374 4375 4376 4377 4378 4379 4380
	if (!pp->neta_armada3700) {
		/* Inform that we are stopping so we don't want to setup the
		 * driver for new CPUs in the notifiers. The code of the
		 * notifier for CPU online is protected by the same spinlock,
		 * so when we get the lock, the notifer work is done.
		 */
		spin_lock(&pp->lock);
		pp->is_stopped = true;
		spin_unlock(&pp->lock);
4381

4382 4383
		mvneta_stop_dev(pp);
		mvneta_mdio_remove(pp);
4384

4385 4386 4387 4388
		cpuhp_state_remove_instance_nocalls(online_hpstate,
						    &pp->node_online);
		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						    &pp->node_dead);
4389 4390 4391 4392 4393 4394 4395 4396
		on_each_cpu(mvneta_percpu_disable, pp, true);
		free_percpu_irq(dev->irq, pp->ports);
	} else {
		mvneta_stop_dev(pp);
		mvneta_mdio_remove(pp);
		free_irq(dev->irq, pp);
	}

4397 4398 4399 4400 4401 4402
	mvneta_cleanup_rxqs(pp);
	mvneta_cleanup_txqs(pp);

	return 0;
}

4403 4404
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
R
Russell King 已提交
4405
	struct mvneta_port *pp = netdev_priv(dev);
4406

R
Russell King 已提交
4407
	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4408 4409
}

4410 4411 4412 4413 4414 4415 4416 4417
static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
			    struct netlink_ext_ack *extack)
{
	bool need_update, running = netif_running(dev);
	struct mvneta_port *pp = netdev_priv(dev);
	struct bpf_prog *old_prog;

	if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4418
		NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
4419
		return -EOPNOTSUPP;
4420 4421 4422 4423 4424 4425
	}

	if (pp->bm_priv) {
		NL_SET_ERR_MSG_MOD(extack,
				   "Hardware Buffer Management not supported on XDP");
		return -EOPNOTSUPP;
4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
	}

	need_update = !!pp->xdp_prog != !!prog;
	if (running && need_update)
		mvneta_stop(dev);

	old_prog = xchg(&pp->xdp_prog, prog);
	if (old_prog)
		bpf_prog_put(old_prog);

	if (running && need_update)
		return mvneta_open(dev);

	return 0;
}

static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
	switch (xdp->command) {
	case XDP_SETUP_PROG:
		return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
	default:
		return -EINVAL;
	}
}

4452 4453
/* Ethtool methods */

4454
/* Set link ksettings (phy address, speed) for ethtools */
4455 4456 4457
static int
mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
				  const struct ethtool_link_ksettings *cmd)
4458
{
4459
	struct mvneta_port *pp = netdev_priv(ndev);
4460

R
Russell King 已提交
4461 4462
	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
}
4463

R
Russell King 已提交
4464 4465 4466 4467 4468 4469
/* Get link ksettings for ethtools */
static int
mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
				  struct ethtool_link_ksettings *cmd)
{
	struct mvneta_port *pp = netdev_priv(ndev);
4470

R
Russell King 已提交
4471 4472
	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
}
4473

R
Russell King 已提交
4474 4475 4476
static int mvneta_ethtool_nway_reset(struct net_device *dev)
{
	struct mvneta_port *pp = netdev_priv(dev);
4477

R
Russell King 已提交
4478
	return phylink_ethtool_nway_reset(pp->phylink);
4479 4480 4481
}

/* Set interrupt coalescing for ethtools */
4482 4483 4484 4485 4486
static int
mvneta_ethtool_set_coalesce(struct net_device *dev,
			    struct ethtool_coalesce *c,
			    struct kernel_ethtool_coalesce *kernel_coal,
			    struct netlink_ext_ack *extack)
4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508
{
	struct mvneta_port *pp = netdev_priv(dev);
	int queue;

	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
		rxq->time_coal = c->rx_coalesce_usecs;
		rxq->pkts_coal = c->rx_max_coalesced_frames;
		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
	}

	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
		txq->done_pkts_coal = c->tx_max_coalesced_frames;
		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
	}

	return 0;
}

/* get coalescing for ethtools */
4509 4510 4511 4512 4513
static int
mvneta_ethtool_get_coalesce(struct net_device *dev,
			    struct ethtool_coalesce *c,
			    struct kernel_ethtool_coalesce *kernel_coal,
			    struct netlink_ext_ack *extack)
4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536
{
	struct mvneta_port *pp = netdev_priv(dev);

	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;

	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
	return 0;
}


static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
				    struct ethtool_drvinfo *drvinfo)
{
	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
		sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
		sizeof(drvinfo->version));
	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
		sizeof(drvinfo->bus_info));
}


4537 4538 4539 4540 4541
static void
mvneta_ethtool_get_ringparam(struct net_device *netdev,
			     struct ethtool_ringparam *ring,
			     struct kernel_ethtool_ringparam *kernel_ring,
			     struct netlink_ext_ack *extack)
4542 4543 4544 4545 4546 4547 4548 4549 4550
{
	struct mvneta_port *pp = netdev_priv(netdev);

	ring->rx_max_pending = MVNETA_MAX_RXD;
	ring->tx_max_pending = MVNETA_MAX_TXD;
	ring->rx_pending = pp->rx_ring_size;
	ring->tx_pending = pp->tx_ring_size;
}

4551 4552 4553 4554 4555
static int
mvneta_ethtool_set_ringparam(struct net_device *dev,
			     struct ethtool_ringparam *ring,
			     struct kernel_ethtool_ringparam *kernel_ring,
			     struct netlink_ext_ack *extack)
4556 4557 4558 4559 4560 4561 4562
{
	struct mvneta_port *pp = netdev_priv(dev);

	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
		return -EINVAL;
	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
		ring->rx_pending : MVNETA_MAX_RXD;
4563 4564 4565 4566 4567 4568

	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
	if (pp->tx_ring_size != ring->tx_pending)
		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
			    pp->tx_ring_size, ring->tx_pending);
4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581

	if (netif_running(dev)) {
		mvneta_stop(dev);
		if (mvneta_open(dev)) {
			netdev_err(dev,
				   "error on opening device after ring param change\n");
			return -ENOMEM;
		}
	}

	return 0;
}

4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597
static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
					  struct ethtool_pauseparam *pause)
{
	struct mvneta_port *pp = netdev_priv(dev);

	phylink_ethtool_get_pauseparam(pp->phylink, pause);
}

static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
					 struct ethtool_pauseparam *pause)
{
	struct mvneta_port *pp = netdev_priv(dev);

	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
}

R
Russell King 已提交
4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
				       u8 *data)
{
	if (sset == ETH_SS_STATS) {
		int i;

		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
			memcpy(data + i * ETH_GSTRING_LEN,
			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
	}
}

4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620
static void
mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
				 struct mvneta_ethtool_stats *es)
{
	unsigned int start;
	int cpu;

	for_each_possible_cpu(cpu) {
		struct mvneta_pcpu_stats *stats;
		u64 skb_alloc_error;
		u64 refill_error;
4621
		u64 xdp_redirect;
4622 4623
		u64 xdp_xmit_err;
		u64 xdp_tx_err;
4624 4625
		u64 xdp_pass;
		u64 xdp_drop;
4626
		u64 xdp_xmit;
4627
		u64 xdp_tx;
4628 4629 4630 4631 4632 4633

		stats = per_cpu_ptr(pp->stats, cpu);
		do {
			start = u64_stats_fetch_begin_irq(&stats->syncp);
			skb_alloc_error = stats->es.skb_alloc_error;
			refill_error = stats->es.refill_error;
4634 4635 4636
			xdp_redirect = stats->es.ps.xdp_redirect;
			xdp_pass = stats->es.ps.xdp_pass;
			xdp_drop = stats->es.ps.xdp_drop;
4637
			xdp_xmit = stats->es.ps.xdp_xmit;
4638
			xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4639
			xdp_tx = stats->es.ps.xdp_tx;
4640
			xdp_tx_err = stats->es.ps.xdp_tx_err;
4641 4642 4643 4644
		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));

		es->skb_alloc_error += skb_alloc_error;
		es->refill_error += refill_error;
4645 4646 4647
		es->ps.xdp_redirect += xdp_redirect;
		es->ps.xdp_pass += xdp_pass;
		es->ps.xdp_drop += xdp_drop;
4648
		es->ps.xdp_xmit += xdp_xmit;
4649
		es->ps.xdp_xmit_err += xdp_xmit_err;
4650
		es->ps.xdp_tx += xdp_tx;
4651
		es->ps.xdp_tx_err += xdp_tx_err;
4652 4653 4654
	}
}

R
Russell King 已提交
4655 4656
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
4657
	struct mvneta_ethtool_stats stats = {};
R
Russell King 已提交
4658 4659
	const struct mvneta_statistic *s;
	void __iomem *base = pp->base;
R
Russell King 已提交
4660 4661
	u32 high, low;
	u64 val;
R
Russell King 已提交
4662 4663
	int i;

4664
	mvneta_ethtool_update_pcpu_stats(pp, &stats);
R
Russell King 已提交
4665 4666 4667 4668 4669 4670
	for (i = 0, s = mvneta_statistics;
	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
	     s++, i++) {
		switch (s->type) {
		case T_REG_32:
			val = readl_relaxed(base + s->offset);
4671
			pp->ethtool_stats[i] += val;
R
Russell King 已提交
4672 4673 4674 4675 4676
			break;
		case T_REG_64:
			/* Docs say to read low 32-bit then high */
			low = readl_relaxed(base + s->offset);
			high = readl_relaxed(base + s->offset + 4);
R
Russell King 已提交
4677
			val = (u64)high << 32 | low;
4678
			pp->ethtool_stats[i] += val;
R
Russell King 已提交
4679 4680 4681 4682 4683
			break;
		case T_SW:
			switch (s->offset) {
			case ETHTOOL_STAT_EEE_WAKEUP:
				val = phylink_get_eee_err(pp->phylink);
4684
				pp->ethtool_stats[i] += val;
R
Russell King 已提交
4685
				break;
4686
			case ETHTOOL_STAT_SKB_ALLOC_ERR:
4687
				pp->ethtool_stats[i] = stats.skb_alloc_error;
4688 4689
				break;
			case ETHTOOL_STAT_REFILL_ERR:
4690
				pp->ethtool_stats[i] = stats.refill_error;
4691
				break;
4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703
			case ETHTOOL_XDP_REDIRECT:
				pp->ethtool_stats[i] = stats.ps.xdp_redirect;
				break;
			case ETHTOOL_XDP_PASS:
				pp->ethtool_stats[i] = stats.ps.xdp_pass;
				break;
			case ETHTOOL_XDP_DROP:
				pp->ethtool_stats[i] = stats.ps.xdp_drop;
				break;
			case ETHTOOL_XDP_TX:
				pp->ethtool_stats[i] = stats.ps.xdp_tx;
				break;
4704 4705 4706
			case ETHTOOL_XDP_TX_ERR:
				pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
				break;
4707 4708 4709
			case ETHTOOL_XDP_XMIT:
				pp->ethtool_stats[i] = stats.ps.xdp_xmit;
				break;
4710 4711 4712
			case ETHTOOL_XDP_XMIT_ERR:
				pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
				break;
R
Russell King 已提交
4713
			}
R
Russell King 已提交
4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737
			break;
		}
	}
}

static void mvneta_ethtool_get_stats(struct net_device *dev,
				     struct ethtool_stats *stats, u64 *data)
{
	struct mvneta_port *pp = netdev_priv(dev);
	int i;

	mvneta_ethtool_update_stats(pp);

	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
		*data++ = pp->ethtool_stats[i];
}

static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
	if (sset == ETH_SS_STATS)
		return ARRAY_SIZE(mvneta_statistics);
	return -EOPNOTSUPP;
}

4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764
static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
	return MVNETA_RSS_LU_TABLE_SIZE;
}

static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
				    struct ethtool_rxnfc *info,
				    u32 *rules __always_unused)
{
	switch (info->cmd) {
	case ETHTOOL_GRXRINGS:
		info->data =  rxq_number;
		return 0;
	case ETHTOOL_GRXFH:
		return -EOPNOTSUPP;
	default:
		return -EOPNOTSUPP;
	}
}

static int  mvneta_config_rss(struct mvneta_port *pp)
{
	int cpu;
	u32 val;

	netif_tx_stop_all_queues(pp->dev);

4765
	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4766

4767 4768 4769 4770 4771
	if (!pp->neta_armada3700) {
		/* We have to synchronise on the napi of each CPU */
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *pcpu_port =
				per_cpu_ptr(pp->ports, cpu);
4772

4773 4774 4775 4776 4777 4778
			napi_synchronize(&pcpu_port->napi);
			napi_disable(&pcpu_port->napi);
		}
	} else {
		napi_synchronize(&pp->napi);
		napi_disable(&pp->napi);
4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790
	}

	pp->rxq_def = pp->indir[0];

	/* Update unicast mapping */
	mvneta_set_rx_mode(pp->dev);

	/* Update val of portCfg register accordingly with all RxQueue types */
	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
	mvreg_write(pp, MVNETA_PORT_CONFIG, val);

	/* Update the elected CPU matching the new rxq_def */
4791
	spin_lock(&pp->lock);
4792
	mvneta_percpu_elect(pp);
4793
	spin_unlock(&pp->lock);
4794

4795 4796 4797 4798 4799
	if (!pp->neta_armada3700) {
		/* We have to synchronise on the napi of each CPU */
		for_each_online_cpu(cpu) {
			struct mvneta_pcpu_port *pcpu_port =
				per_cpu_ptr(pp->ports, cpu);
4800

4801 4802 4803 4804
			napi_enable(&pcpu_port->napi);
		}
	} else {
		napi_enable(&pp->napi);
4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815
	}

	netif_tx_start_all_queues(pp->dev);

	return 0;
}

static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
				   const u8 *key, const u8 hfunc)
{
	struct mvneta_port *pp = netdev_priv(dev);
4816 4817 4818 4819 4820

	/* Current code for Armada 3700 doesn't support RSS features yet */
	if (pp->neta_armada3700)
		return -EOPNOTSUPP;

4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840
	/* We require at least one supported parameter to be changed
	 * and no change in any of the unsupported parameters
	 */
	if (key ||
	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
		return -EOPNOTSUPP;

	if (!indir)
		return 0;

	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);

	return mvneta_config_rss(pp);
}

static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
				   u8 *hfunc)
{
	struct mvneta_port *pp = netdev_priv(dev);

4841 4842 4843 4844
	/* Current code for Armada 3700 doesn't support RSS features yet */
	if (pp->neta_armada3700)
		return -EOPNOTSUPP;

4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855
	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;

	if (!indir)
		return 0;

	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);

	return 0;
}

4856 4857 4858
static void mvneta_ethtool_get_wol(struct net_device *dev,
				   struct ethtool_wolinfo *wol)
{
R
Russell King 已提交
4859
	struct mvneta_port *pp = netdev_priv(dev);
4860

R
Russell King 已提交
4861
	phylink_ethtool_get_wol(pp->phylink, wol);
4862 4863 4864 4865 4866
}

static int mvneta_ethtool_set_wol(struct net_device *dev,
				  struct ethtool_wolinfo *wol)
{
R
Russell King 已提交
4867
	struct mvneta_port *pp = netdev_priv(dev);
4868 4869
	int ret;

R
Russell King 已提交
4870
	ret = phylink_ethtool_set_wol(pp->phylink, wol);
4871 4872 4873 4874
	if (!ret)
		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);

	return ret;
4875 4876
}

R
Russell King 已提交
4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900
static int mvneta_ethtool_get_eee(struct net_device *dev,
				  struct ethtool_eee *eee)
{
	struct mvneta_port *pp = netdev_priv(dev);
	u32 lpi_ctl0;

	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);

	eee->eee_enabled = pp->eee_enabled;
	eee->eee_active = pp->eee_active;
	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;

	return phylink_ethtool_get_eee(pp->phylink, eee);
}

static int mvneta_ethtool_set_eee(struct net_device *dev,
				  struct ethtool_eee *eee)
{
	struct mvneta_port *pp = netdev_priv(dev);
	u32 lpi_ctl0;

	/* The Armada 37x documents do not give limits for this other than
	 * it being an 8-bit register. */
4901
	if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
R
Russell King 已提交
4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916
		return -EINVAL;

	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
	lpi_ctl0 &= ~(0xff << 8);
	lpi_ctl0 |= eee->tx_lpi_timer << 8;
	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);

	pp->eee_enabled = eee->eee_enabled;
	pp->tx_lpi_enabled = eee->tx_lpi_enabled;

	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);

	return phylink_ethtool_set_eee(pp->phylink, eee);
}

4917 4918 4919 4920 4921 4922 4923
static const struct net_device_ops mvneta_netdev_ops = {
	.ndo_open            = mvneta_open,
	.ndo_stop            = mvneta_stop,
	.ndo_start_xmit      = mvneta_tx,
	.ndo_set_rx_mode     = mvneta_set_rx_mode,
	.ndo_set_mac_address = mvneta_set_mac_addr,
	.ndo_change_mtu      = mvneta_change_mtu,
4924
	.ndo_fix_features    = mvneta_fix_features,
4925
	.ndo_get_stats64     = mvneta_get_stats64,
4926
	.ndo_do_ioctl        = mvneta_ioctl,
4927
	.ndo_bpf	     = mvneta_xdp,
L
Lorenzo Bianconi 已提交
4928
	.ndo_xdp_xmit        = mvneta_xdp_xmit,
4929 4930
};

4931
static const struct ethtool_ops mvneta_eth_tool_ops = {
4932 4933
	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
				     ETHTOOL_COALESCE_MAX_FRAMES,
R
Russell King 已提交
4934
	.nway_reset	= mvneta_ethtool_nway_reset,
4935 4936 4937 4938 4939 4940
	.get_link       = ethtool_op_get_link,
	.set_coalesce   = mvneta_ethtool_set_coalesce,
	.get_coalesce   = mvneta_ethtool_get_coalesce,
	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
	.get_ringparam  = mvneta_ethtool_get_ringparam,
	.set_ringparam	= mvneta_ethtool_set_ringparam,
4941 4942
	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
R
Russell King 已提交
4943 4944 4945
	.get_strings	= mvneta_ethtool_get_strings,
	.get_ethtool_stats = mvneta_ethtool_get_stats,
	.get_sset_count	= mvneta_ethtool_get_sset_count,
4946 4947 4948 4949
	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
	.get_rxfh	= mvneta_ethtool_get_rxfh,
	.set_rxfh	= mvneta_ethtool_set_rxfh,
R
Russell King 已提交
4950
	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4951
	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4952 4953
	.get_wol        = mvneta_ethtool_get_wol,
	.set_wol        = mvneta_ethtool_set_wol,
R
Russell King 已提交
4954 4955
	.get_eee	= mvneta_ethtool_get_eee,
	.set_eee	= mvneta_ethtool_set_eee,
4956 4957 4958
};

/* Initialize hw */
4959
static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4960 4961 4962 4963 4964 4965 4966 4967 4968
{
	int queue;

	/* Disable port */
	mvneta_port_disable(pp);

	/* Set port default values */
	mvneta_defaults_set(pp);

4969
	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980
	if (!pp->txqs)
		return -ENOMEM;

	/* Initialize TX descriptor rings */
	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];
		txq->id = queue;
		txq->size = pp->tx_ring_size;
		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
	}

4981
	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4982
	if (!pp->rxqs)
4983 4984 4985 4986 4987 4988 4989 4990 4991
		return -ENOMEM;

	/* Create Rx descriptor rings */
	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
		rxq->id = queue;
		rxq->size = pp->rx_ring_size;
		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
		rxq->time_coal = MVNETA_RX_COAL_USEC;
4992 4993 4994 4995 4996
		rxq->buf_virt_addr
			= devm_kmalloc_array(pp->dev->dev.parent,
					     rxq->size,
					     sizeof(*rxq->buf_virt_addr),
					     GFP_KERNEL);
4997 4998
		if (!rxq->buf_virt_addr)
			return -ENOMEM;
4999 5000 5001 5002 5003 5004
	}

	return 0;
}

/* platform glue : initialize decoding windows */
G
Greg KH 已提交
5005 5006
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
				     const struct mbus_dram_target_info *dram)
5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022
{
	u32 win_enable;
	u32 win_protect;
	int i;

	for (i = 0; i < 6; i++) {
		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);

		if (i < 4)
			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
	}

	win_enable = 0x3f;
	win_protect = 0;

5023 5024 5025 5026 5027 5028 5029 5030
	if (dram) {
		for (i = 0; i < dram->num_cs; i++) {
			const struct mbus_dram_window *cs = dram->cs + i;

			mvreg_write(pp, MVNETA_WIN_BASE(i),
				    (cs->base & 0xffff0000) |
				    (cs->mbus_attr << 8) |
				    dram->mbus_dram_target_id);
5031

5032 5033
			mvreg_write(pp, MVNETA_WIN_SIZE(i),
				    (cs->size - 1) & 0xffff0000);
5034

5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045
			win_enable &= ~(1 << i);
			win_protect |= 3 << (2 * i);
		}
	} else {
		/* For Armada3700 open default 4GB Mbus window, leaving
		 * arbitration of target/attribute to a different layer
		 * of configuration.
		 */
		mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
		win_enable &= ~BIT(0);
		win_protect = 3;
5046 5047 5048
	}

	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5049
	mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5050 5051 5052
}

/* Power up the port */
5053
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5054 5055 5056 5057
{
	/* MAC Cause register should be cleared */
	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);

5058 5059 5060 5061
	if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
	    phy_mode != PHY_INTERFACE_MODE_SGMII &&
	    !phy_interface_mode_is_8023z(phy_mode) &&
	    !phy_interface_mode_is_rgmii(phy_mode))
5062 5063 5064
		return -EINVAL;

	return 0;
5065 5066 5067
}

/* Device initialization routine */
G
Greg KH 已提交
5068
static int mvneta_probe(struct platform_device *pdev)
5069 5070
{
	struct device_node *dn = pdev->dev.of_node;
5071
	struct device_node *bm_node;
5072 5073
	struct mvneta_port *pp;
	struct net_device *dev;
R
Russell King 已提交
5074
	struct phylink *phylink;
5075
	struct phy *comphy;
5076 5077
	const char *dt_mac_addr;
	char hw_mac_addr[ETH_ALEN];
5078
	phy_interface_t phy_mode;
5079
	const char *mac_from;
5080
	int tx_csum_limit;
5081
	int err;
5082
	int cpu;
5083

5084 5085
	dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
				      txq_number, rxq_number);
5086 5087 5088 5089
	if (!dev)
		return -ENOMEM;

	dev->irq = irq_of_parse_and_map(dn, 0);
5090 5091
	if (dev->irq == 0)
		return -EINVAL;
5092

5093 5094
	err = of_get_phy_mode(dn, &phy_mode);
	if (err) {
5095
		dev_err(&pdev->dev, "incorrect phy-mode\n");
R
Russell King 已提交
5096 5097 5098
		goto err_free_irq;
	}

5099 5100 5101 5102 5103 5104 5105 5106
	comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
	if (comphy == ERR_PTR(-EPROBE_DEFER)) {
		err = -EPROBE_DEFER;
		goto err_free_irq;
	} else if (IS_ERR(comphy)) {
		comphy = NULL;
	}

5107 5108 5109 5110 5111 5112 5113 5114
	pp = netdev_priv(dev);
	spin_lock_init(&pp->lock);

	pp->phylink_config.dev = &dev->dev;
	pp->phylink_config.type = PHYLINK_NETDEV;

	phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
				 phy_mode, &mvneta_phylink_ops);
R
Russell King 已提交
5115 5116 5117
	if (IS_ERR(phylink)) {
		err = PTR_ERR(phylink);
		goto err_free_irq;
5118 5119 5120 5121 5122 5123
	}

	dev->tx_queue_len = MVNETA_MAX_TXD;
	dev->watchdog_timeo = 5 * HZ;
	dev->netdev_ops = &mvneta_netdev_ops;

5124
	dev->ethtool_ops = &mvneta_eth_tool_ops;
5125

R
Russell King 已提交
5126
	pp->phylink = phylink;
5127
	pp->comphy = comphy;
5128
	pp->phy_interface = phy_mode;
R
Russell King 已提交
5129
	pp->dn = dn;
5130

5131
	pp->rxq_def = rxq_def;
5132 5133
	pp->indir[0] = rxq_def;

5134 5135 5136 5137
	/* Get special SoC configurations */
	if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
		pp->neta_armada3700 = true;

5138 5139 5140
	pp->clk = devm_clk_get(&pdev->dev, "core");
	if (IS_ERR(pp->clk))
		pp->clk = devm_clk_get(&pdev->dev, NULL);
T
Thomas Petazzoni 已提交
5141 5142
	if (IS_ERR(pp->clk)) {
		err = PTR_ERR(pp->clk);
R
Russell King 已提交
5143
		goto err_free_phylink;
T
Thomas Petazzoni 已提交
5144 5145 5146 5147
	}

	clk_prepare_enable(pp->clk);

5148 5149 5150 5151
	pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
	if (!IS_ERR(pp->clk_bus))
		clk_prepare_enable(pp->clk_bus);

5152
	pp->base = devm_platform_ioremap_resource(pdev, 0);
5153 5154
	if (IS_ERR(pp->base)) {
		err = PTR_ERR(pp->base);
5155 5156 5157
		goto err_clk;
	}

5158 5159 5160 5161 5162 5163 5164
	/* Alloc per-cpu port structure */
	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
	if (!pp->ports) {
		err = -ENOMEM;
		goto err_clk;
	}

5165
	/* Alloc per-cpu stats */
5166
	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5167 5168
	if (!pp->stats) {
		err = -ENOMEM;
5169
		goto err_free_ports;
5170 5171
	}

5172
	dt_mac_addr = of_get_mac_address(dn);
5173
	if (!IS_ERR(dt_mac_addr)) {
5174
		mac_from = "device tree";
5175
		ether_addr_copy(dev->dev_addr, dt_mac_addr);
5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186
	} else {
		mvneta_get_mac_addr(pp, hw_mac_addr);
		if (is_valid_ether_addr(hw_mac_addr)) {
			mac_from = "hardware";
			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
		} else {
			mac_from = "random";
			eth_hw_addr_random(dev);
		}
	}

5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201
	if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
		if (tx_csum_limit < 0 ||
		    tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
			tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
			dev_info(&pdev->dev,
				 "Wrong TX csum limit in DT, set to %dB\n",
				 MVNETA_TX_CSUM_DEF_SIZE);
		}
	} else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
		tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
	} else {
		tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
	}

	pp->tx_csum_limit = tx_csum_limit;
5202

5203
	pp->dram_target_info = mv_mbus_dram_info();
5204 5205 5206 5207
	/* Armada3700 requires setting default configuration of Mbus
	 * windows, however without using filled mbus_dram_target_info
	 * structure.
	 */
5208 5209
	if (pp->dram_target_info || pp->neta_armada3700)
		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5210

5211 5212 5213 5214 5215 5216
	pp->tx_ring_size = MVNETA_MAX_TXD;
	pp->rx_ring_size = MVNETA_MAX_RXD;

	pp->dev = dev;
	SET_NETDEV_DEV(dev, &pdev->dev);

5217 5218 5219 5220
	pp->id = global_port_id++;

	/* Obtain access to BM resources if enabled and already initialized */
	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5221 5222 5223 5224 5225 5226 5227 5228 5229 5230
	if (bm_node) {
		pp->bm_priv = mvneta_bm_get(bm_node);
		if (pp->bm_priv) {
			err = mvneta_bm_port_init(pdev, pp);
			if (err < 0) {
				dev_info(&pdev->dev,
					 "use SW buffer management\n");
				mvneta_bm_put(pp->bm_priv);
				pp->bm_priv = NULL;
			}
5231
		}
5232 5233 5234 5235 5236 5237 5238
		/* Set RX packet offset correction for platforms, whose
		 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
		 * platforms and 0B for 32-bit ones.
		 */
		pp->rx_offset_correction = max(0,
					       NET_SKB_PAD -
					       MVNETA_RX_PKT_OFFSET_CORRECTION);
5239
	}
5240
	of_node_put(bm_node);
5241

5242 5243 5244 5245
	/* sw buffer management */
	if (!pp->bm_priv)
		pp->rx_offset_correction = MVNETA_SKB_HEADROOM;

5246 5247
	err = mvneta_init(&pdev->dev, pp);
	if (err < 0)
5248
		goto err_netdev;
5249

5250
	err = mvneta_port_power_up(pp, pp->phy_interface);
5251 5252
	if (err < 0) {
		dev_err(&pdev->dev, "can't power up port\n");
5253
		goto err_netdev;
5254
	}
5255

5256 5257 5258 5259 5260 5261 5262 5263 5264
	/* Armada3700 network controller does not support per-cpu
	 * operation, so only single NAPI should be initialized.
	 */
	if (pp->neta_armada3700) {
		netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
	} else {
		for_each_present_cpu(cpu) {
			struct mvneta_pcpu_port *port =
				per_cpu_ptr(pp->ports, cpu);
5265

5266 5267 5268 5269
			netif_napi_add(dev, &port->napi, mvneta_poll,
				       NAPI_POLL_WEIGHT);
			port->pp = pp;
		}
5270
	}
5271

5272 5273
	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			NETIF_F_TSO | NETIF_F_RXCSUM;
5274 5275
	dev->hw_features |= dev->features;
	dev->vlan_features |= dev->features;
5276
	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5277
	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5278

5279 5280 5281 5282 5283
	/* MTU range: 68 - 9676 */
	dev->min_mtu = ETH_MIN_MTU;
	/* 9676 == 9700 - 20 and rounding to 8 */
	dev->max_mtu = 9676;

5284 5285 5286
	err = register_netdev(dev);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to register\n");
5287
		goto err_netdev;
5288 5289
	}

5290 5291
	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
		    dev->dev_addr);
5292 5293 5294 5295 5296

	platform_set_drvdata(pdev, pp->dev);

	return 0;

5297 5298 5299 5300 5301
err_netdev:
	if (pp->bm_priv) {
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
				       1 << pp->id);
5302
		mvneta_bm_put(pp->bm_priv);
5303
	}
5304
	free_percpu(pp->stats);
5305 5306
err_free_ports:
	free_percpu(pp->ports);
5307
err_clk:
5308
	clk_disable_unprepare(pp->clk_bus);
5309
	clk_disable_unprepare(pp->clk);
R
Russell King 已提交
5310 5311 5312
err_free_phylink:
	if (pp->phylink)
		phylink_destroy(pp->phylink);
5313 5314 5315 5316 5317 5318
err_free_irq:
	irq_dispose_mapping(dev->irq);
	return err;
}

/* Device removal routine */
G
Greg KH 已提交
5319
static int mvneta_remove(struct platform_device *pdev)
5320 5321 5322 5323 5324
{
	struct net_device  *dev = platform_get_drvdata(pdev);
	struct mvneta_port *pp = netdev_priv(dev);

	unregister_netdev(dev);
5325
	clk_disable_unprepare(pp->clk_bus);
T
Thomas Petazzoni 已提交
5326
	clk_disable_unprepare(pp->clk);
5327
	free_percpu(pp->ports);
5328
	free_percpu(pp->stats);
5329
	irq_dispose_mapping(dev->irq);
R
Russell King 已提交
5330
	phylink_destroy(pp->phylink);
5331

5332 5333 5334 5335
	if (pp->bm_priv) {
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
				       1 << pp->id);
5336
		mvneta_bm_put(pp->bm_priv);
5337 5338
	}

5339 5340 5341
	return 0;
}

5342 5343 5344
#ifdef CONFIG_PM_SLEEP
static int mvneta_suspend(struct device *device)
{
5345
	int queue;
5346 5347 5348
	struct net_device *dev = dev_get_drvdata(device);
	struct mvneta_port *pp = netdev_priv(dev);

5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362
	if (!netif_running(dev))
		goto clean_exit;

	if (!pp->neta_armada3700) {
		spin_lock(&pp->lock);
		pp->is_stopped = true;
		spin_unlock(&pp->lock);

		cpuhp_state_remove_instance_nocalls(online_hpstate,
						    &pp->node_online);
		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						    &pp->node_dead);
	}

5363
	rtnl_lock();
5364
	mvneta_stop_dev(pp);
5365
	rtnl_unlock();
5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379

	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];

		mvneta_rxq_drop_pkts(pp, rxq);
	}

	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];

		mvneta_txq_hw_deinit(pp, txq);
	}

clean_exit:
5380 5381 5382
	netif_device_detach(dev);
	clk_disable_unprepare(pp->clk_bus);
	clk_disable_unprepare(pp->clk);
5383

5384 5385 5386 5387 5388 5389 5390 5391
	return 0;
}

static int mvneta_resume(struct device *device)
{
	struct platform_device *pdev = to_platform_device(device);
	struct net_device *dev = dev_get_drvdata(device);
	struct mvneta_port *pp = netdev_priv(dev);
5392
	int err, queue;
5393 5394 5395 5396 5397 5398 5399 5400 5401 5402

	clk_prepare_enable(pp->clk);
	if (!IS_ERR(pp->clk_bus))
		clk_prepare_enable(pp->clk_bus);
	if (pp->dram_target_info || pp->neta_armada3700)
		mvneta_conf_mbus_windows(pp, pp->dram_target_info);
	if (pp->bm_priv) {
		err = mvneta_bm_port_init(pdev, pp);
		if (err < 0) {
			dev_info(&pdev->dev, "use SW buffer management\n");
5403
			pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414
			pp->bm_priv = NULL;
		}
	}
	mvneta_defaults_set(pp);
	err = mvneta_port_power_up(pp, pp->phy_interface);
	if (err < 0) {
		dev_err(device, "can't power up port\n");
		return err;
	}

	netif_device_attach(dev);
5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430

	if (!netif_running(dev))
		return 0;

	for (queue = 0; queue < rxq_number; queue++) {
		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];

		rxq->next_desc_to_proc = 0;
		mvneta_rxq_hw_init(pp, rxq);
	}

	for (queue = 0; queue < txq_number; queue++) {
		struct mvneta_tx_queue *txq = &pp->txqs[queue];

		txq->next_desc_to_proc = 0;
		mvneta_txq_hw_init(pp, txq);
5431
	}
5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444

	if (!pp->neta_armada3700) {
		spin_lock(&pp->lock);
		pp->is_stopped = false;
		spin_unlock(&pp->lock);
		cpuhp_state_add_instance_nocalls(online_hpstate,
						 &pp->node_online);
		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
						 &pp->node_dead);
	}

	rtnl_lock();
	mvneta_start_dev(pp);
5445
	rtnl_unlock();
5446
	mvneta_set_rx_mode(dev);
5447

5448 5449 5450 5451 5452 5453
	return 0;
}
#endif

static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);

5454 5455
static const struct of_device_id mvneta_match[] = {
	{ .compatible = "marvell,armada-370-neta" },
5456
	{ .compatible = "marvell,armada-xp-neta" },
5457
	{ .compatible = "marvell,armada-3700-neta" },
5458 5459 5460 5461 5462 5463
	{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);

static struct platform_driver mvneta_driver = {
	.probe = mvneta_probe,
G
Greg KH 已提交
5464
	.remove = mvneta_remove,
5465 5466 5467
	.driver = {
		.name = MVNETA_DRIVER_NAME,
		.of_match_table = mvneta_match,
5468
		.pm = &mvneta_pm_ops,
5469 5470 5471
	},
};

5472 5473 5474 5475
static int __init mvneta_driver_init(void)
{
	int ret;

C
Christophe JAILLET 已提交
5476
	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507
				      mvneta_cpu_online,
				      mvneta_cpu_down_prepare);
	if (ret < 0)
		goto out;
	online_hpstate = ret;
	ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
				      NULL, mvneta_cpu_dead);
	if (ret)
		goto err_dead;

	ret = platform_driver_register(&mvneta_driver);
	if (ret)
		goto err;
	return 0;

err:
	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
err_dead:
	cpuhp_remove_multi_state(online_hpstate);
out:
	return ret;
}
module_init(mvneta_driver_init);

static void __exit mvneta_driver_exit(void)
{
	platform_driver_unregister(&mvneta_driver);
	cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
	cpuhp_remove_multi_state(online_hpstate);
}
module_exit(mvneta_driver_exit);
5508 5509 5510 5511 5512

MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_LICENSE("GPL");

5513 5514
module_param(rxq_number, int, 0444);
module_param(txq_number, int, 0444);
5515

5516 5517
module_param(rxq_def, int, 0444);
module_param(rx_copybreak, int, 0644);