enh_desc.c 13.1 KB
Newer Older
1 2 3
/*******************************************************************************
  This contains the functions to handle the enhanced descriptors.

4
  Copyright (C) 2007-2014  STMicroelectronics Ltd
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/

21
#include <linux/stmmac.h>
22
#include "common.h"
23
#include "descs_com.h"
24 25

static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
26
				  struct dma_desc *p, void __iomem *ioaddr)
27 28
{
	struct net_device_stats *stats = (struct net_device_stats *)data;
29
	unsigned int tdes0 = le32_to_cpu(p->des0);
30 31 32 33 34 35 36 37 38
	int ret = tx_done;

	/* Get tx owner first */
	if (unlikely(tdes0 & ETDES0_OWN))
		return tx_dma_own;

	/* Verify tx error by looking at the last segment. */
	if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
		return tx_not_ls;
39

40 41
	if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
		if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
42 43
			x->tx_jabber++;

44
		if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
45
			x->tx_frame_flushed++;
46
			dwmac_dma_flush_tx_fifo(ioaddr);
47 48
		}

49
		if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
50 51 52
			x->tx_losscarrier++;
			stats->tx_carrier_errors++;
		}
53
		if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
54 55 56
			x->tx_carrier++;
			stats->tx_carrier_errors++;
		}
57 58 59 60
		if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
			     (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
			stats->collisions +=
				(tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
61

62
		if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
63 64
			x->tx_deferred++;

65
		if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
66
			dwmac_dma_flush_tx_fifo(ioaddr);
67 68 69
			x->tx_underflow++;
		}

70
		if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
71 72
			x->tx_ip_header_error++;

73
		if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
74
			x->tx_payload_error++;
75
			dwmac_dma_flush_tx_fifo(ioaddr);
76 77
		}

78
		ret = tx_err;
79 80
	}

81
	if (unlikely(tdes0 & ETDES0_DEFERRED))
82
		x->tx_deferred++;
83

84
#ifdef STMMAC_VLAN_TAG_USED
85
	if (tdes0 & ETDES0_VLAN_FRAME)
86 87 88 89 90 91 92 93
		x->tx_vlan++;
#endif

	return ret;
}

static int enh_desc_get_tx_len(struct dma_desc *p)
{
94
	return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
}

static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
{
	int ret = good_frame;
	u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;

	/* bits 5 7 0 | Frame status
	 * ----------------------------------------------------------
	 *      0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
	 *      1 0 0 | IPv4/6 No CSUM errorS.
	 *      1 0 1 | IPv4/6 CSUM PAYLOAD error
	 *      1 1 0 | IPv4/6 CSUM IP HR error
	 *      1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
	 *      0 0 1 | IPv4/6 unsupported IP PAYLOAD
	 *      0 1 1 | COE bypassed.. no IPv4/6 frame
	 *      0 1 0 | Reserved.
	 */
113
	if (status == 0x0)
114
		ret = llc_snap;
115
	else if (status == 0x4)
116
		ret = good_frame;
117
	else if (status == 0x5)
118
		ret = csum_none;
119
	else if (status == 0x6)
120
		ret = csum_none;
121
	else if (status == 0x7)
122
		ret = csum_none;
123
	else if (status == 0x1)
124
		ret = discard_frame;
125
	else if (status == 0x3)
126 127 128 129
		ret = discard_frame;
	return ret;
}

130 131 132
static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
				    struct dma_extended_desc *p)
{
133 134
	unsigned int rdes0 = le32_to_cpu(p->basic.des0);
	unsigned int rdes4 = le32_to_cpu(p->des4);
135 136 137 138 139

	if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
		int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;

		if (rdes4 & ERDES4_IP_HDR_ERR)
140
			x->ip_hdr_err++;
141
		if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
142
			x->ip_payload_err++;
143
		if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
144
			x->ip_csum_bypassed++;
145
		if (rdes4 & ERDES4_IPV4_PKT_RCVD)
146
			x->ipv4_pkt_rcvd++;
147
		if (rdes4 & ERDES4_IPV6_PKT_RCVD)
148
			x->ipv6_pkt_rcvd++;
149 150 151 152 153

		if (message_type == RDES_EXT_NO_PTP)
			x->no_ptp_rx_msg_type_ext++;
		else if (message_type == RDES_EXT_SYNC)
			x->ptp_rx_msg_type_sync++;
154
		else if (message_type == RDES_EXT_FOLLOW_UP)
155
			x->ptp_rx_msg_type_follow_up++;
156
		else if (message_type == RDES_EXT_DELAY_REQ)
157
			x->ptp_rx_msg_type_delay_req++;
158
		else if (message_type == RDES_EXT_DELAY_RESP)
159
			x->ptp_rx_msg_type_delay_resp++;
160
		else if (message_type == RDES_EXT_PDELAY_REQ)
161
			x->ptp_rx_msg_type_pdelay_req++;
162
		else if (message_type == RDES_EXT_PDELAY_RESP)
163
			x->ptp_rx_msg_type_pdelay_resp++;
164
		else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
165 166 167 168 169 170 171 172
			x->ptp_rx_msg_type_pdelay_follow_up++;
		else if (message_type == RDES_PTP_ANNOUNCE)
			x->ptp_rx_msg_type_announce++;
		else if (message_type == RDES_PTP_MANAGEMENT)
			x->ptp_rx_msg_type_management++;
		else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
			x->ptp_rx_msg_pkt_reserved_type++;

173
		if (rdes4 & ERDES4_PTP_FRAME_TYPE)
174
			x->ptp_frame_type++;
175
		if (rdes4 & ERDES4_PTP_VER)
176
			x->ptp_ver++;
177
		if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
178
			x->timestamp_dropped++;
179
		if (rdes4 & ERDES4_AV_PKT_RCVD)
180
			x->av_pkt_rcvd++;
181
		if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
182
			x->av_tagged_pkt_rcvd++;
183
		if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
184
			x->vlan_tag_priority_val++;
185
		if (rdes4 & ERDES4_L3_FILTER_MATCH)
186
			x->l3_filter_match++;
187
		if (rdes4 & ERDES4_L4_FILTER_MATCH)
188
			x->l4_filter_match++;
189
		if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
190 191 192 193
			x->l3_l4_filter_no_match++;
	}
}

194 195 196 197
static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
				  struct dma_desc *p)
{
	struct net_device_stats *stats = (struct net_device_stats *)data;
198
	unsigned int rdes0 = le32_to_cpu(p->des0);
199
	int ret = good_frame;
200

201 202 203
	if (unlikely(rdes0 & RDES0_OWN))
		return dma_own;

204 205 206 207 208
	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
		stats->rx_length_errors++;
		return discard_frame;
	}

209 210
	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
211 212 213
			x->rx_desc++;
			stats->rx_length_errors++;
		}
214
		if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
215 216
			x->rx_gmac_overflow++;

217
		if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
218
			pr_err("\tIPC Csum Error/Giant frame\n");
219

220
		if (unlikely(rdes0 & RDES0_COLLISION))
221
			stats->collisions++;
222
		if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
223
			x->rx_watchdog++;
224

225
		if (unlikely(rdes0 & RDES0_MII_ERROR))	/* GMII */
226
			x->rx_mii++;
227

228
		if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
229
			x->rx_crc_errors++;
230 231 232 233 234 235 236 237 238
			stats->rx_crc_errors++;
		}
		ret = discard_frame;
	}

	/* After a payload csum error, the ES bit is set.
	 * It doesn't match with the information reported into the databook.
	 * At any rate, we need to understand if the CSUM hw computation is ok
	 * and report this info to the upper layers. */
239 240 241 242
	if (likely(ret == good_frame))
		ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
					 !!(rdes0 & RDES0_FRAME_TYPE),
					 !!(rdes0 & ERDES0_RX_MAC_ADDR));
243

244
	if (unlikely(rdes0 & RDES0_DRIBBLING))
245
		x->dribbling_bit++;
246

247
	if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
248 249 250
		x->sa_rx_filter_fail++;
		ret = discard_frame;
	}
251
	if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
252 253 254
		x->da_rx_filter_fail++;
		ret = discard_frame;
	}
255
	if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
256 257 258 259
		x->rx_length++;
		ret = discard_frame;
	}
#ifdef STMMAC_VLAN_TAG_USED
260
	if (rdes0 & RDES0_VLAN_TAG)
261 262
		x->rx_vlan++;
#endif
263

264 265 266
	return ret;
}

267
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
268
				  int mode, int end, int bfsize)
269
{
270 271
	int bfsize1;

272
	p->des0 |= cpu_to_le32(RDES0_OWN);
273 274 275

	bfsize1 = min(bfsize, BUF_SIZE_8KiB);
	p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
276

277
	if (mode == STMMAC_CHAIN_MODE)
278
		ehn_desc_rx_set_on_chain(p);
279
	else
280
		ehn_desc_rx_set_on_ring(p, end, bfsize);
281

282
	if (disable_rx_ic)
283
		p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
284 285
}

286
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
287
{
288
	p->des0 &= cpu_to_le32(~ETDES0_OWN);
289
	if (mode == STMMAC_CHAIN_MODE)
290
		enh_desc_end_tx_desc_on_chain(p);
291
	else
292
		enh_desc_end_tx_desc_on_ring(p, end);
293 294 295 296
}

static int enh_desc_get_tx_owner(struct dma_desc *p)
{
297
	return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
298 299 300 301
}

static void enh_desc_set_tx_owner(struct dma_desc *p)
{
302
	p->des0 |= cpu_to_le32(ETDES0_OWN);
303 304
}

305
static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
306
{
307
	p->des0 |= cpu_to_le32(RDES0_OWN);
308 309 310 311
}

static int enh_desc_get_tx_ls(struct dma_desc *p)
{
312
	return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
313 314
}

315
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
316
{
317
	int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
318

319
	memset(p, 0, offsetof(struct dma_desc, des2));
320
	if (mode == STMMAC_CHAIN_MODE)
321
		enh_desc_end_tx_desc_on_chain(p);
322 323
	else
		enh_desc_end_tx_desc_on_ring(p, ter);
324 325 326
}

static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
327
				     bool csum_flag, int mode, bool tx_own,
328
				     bool ls, unsigned int tot_pkt_len)
329
{
330
	unsigned int tdes0 = le32_to_cpu(p->des0);
331

332 333 334 335 336
	if (mode == STMMAC_CHAIN_MODE)
		enh_set_tx_desc_len_on_chain(p, len);
	else
		enh_set_tx_desc_len_on_ring(p, len);

337 338 339 340 341 342 343 344 345 346
	if (is_fs)
		tdes0 |= ETDES0_FIRST_SEGMENT;
	else
		tdes0 &= ~ETDES0_FIRST_SEGMENT;

	if (likely(csum_flag))
		tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
	else
		tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);

347 348 349 350
	if (ls)
		tdes0 |= ETDES0_LAST_SEGMENT;

	/* Finally set the OWN bit. Later the DMA will start! */
351 352 353
	if (tx_own)
		tdes0 |= ETDES0_OWN;

354
	if (is_fs && tx_own)
355 356 357 358
		/* When the own bit, for the first frame, has to be set, all
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
P
Pavel Machek 已提交
359
		dma_wmb();
360

361
	p->des0 = cpu_to_le32(tdes0);
362 363
}

364
static void enh_desc_set_tx_ic(struct dma_desc *p)
365
{
366
	p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
367 368
}

369
static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
370
{
371
	unsigned int csum = 0;
372 373 374 375
	/* The type-1 checksum offload engines append the checksum at
	 * the end of frame and the two bytes of checksum are added in
	 * the length.
	 * Adjust for that in the framelen for type-1 checksum offload
376 377
	 * engines.
	 */
378
	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
379 380
		csum = 2;

381 382
	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
				>> RDES0_FRAME_LEN_SHIFT) - csum);
383 384
}

385 386
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
{
387
	p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
388 389 390 391
}

static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
{
392
	return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
393 394
}

395
static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
396 397 398 399 400
{
	u64 ns;

	if (ats) {
		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
401
		ns = le32_to_cpu(p->des6);
402
		/* convert high/sec time stamp value to nanosecond */
403
		ns += le32_to_cpu(p->des7) * 1000000000ULL;
404 405
	} else {
		struct dma_desc *p = (struct dma_desc *)desc;
406 407
		ns = le32_to_cpu(p->des2);
		ns += le32_to_cpu(p->des3) * 1000000000ULL;
408 409
	}

410
	*ts = ns;
411 412
}

413 414
static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
					    u32 ats)
415 416 417
{
	if (ats) {
		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
418
		return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
419 420
	} else {
		struct dma_desc *p = (struct dma_desc *)desc;
421 422
		if ((le32_to_cpu(p->des2) == 0xffffffff) &&
		    (le32_to_cpu(p->des3) == 0xffffffff))
423 424 425 426 427 428 429
			/* timestamp is corrupted, hence don't store it */
			return 0;
		else
			return 1;
	}
}

430 431 432 433 434 435 436 437 438 439 440
static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
{
	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
	int i;

	pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");

	for (i = 0; i < size; i++) {
		u64 x;

		x = *(u64 *)ep;
441
		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
442 443 444 445 446 447 448 449
			i, (unsigned int)virt_to_phys(ep),
			(unsigned int)x, (unsigned int)(x >> 32),
			ep->basic.des2, ep->basic.des3);
		ep++;
	}
	pr_info("\n");
}

450 451 452 453 454
static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr)
{
	*addr = le32_to_cpu(p->des2);
}

455 456 457 458 459
static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
{
	p->des2 = cpu_to_le32(addr);
}

460 461 462 463 464
static void enh_desc_clear(struct dma_desc *p)
{
	p->des2 = 0;
}

465
const struct stmmac_desc_ops enh_desc_ops = {
466 467 468 469 470 471 472 473
	.tx_status = enh_desc_get_tx_status,
	.rx_status = enh_desc_get_rx_status,
	.get_tx_len = enh_desc_get_tx_len,
	.init_rx_desc = enh_desc_init_rx_desc,
	.init_tx_desc = enh_desc_init_tx_desc,
	.get_tx_owner = enh_desc_get_tx_owner,
	.release_tx_desc = enh_desc_release_tx_desc,
	.prepare_tx_desc = enh_desc_prepare_tx_desc,
474
	.set_tx_ic = enh_desc_set_tx_ic,
475 476 477 478
	.get_tx_ls = enh_desc_get_tx_ls,
	.set_tx_owner = enh_desc_set_tx_owner,
	.set_rx_owner = enh_desc_set_rx_owner,
	.get_rx_frame_len = enh_desc_get_rx_frame_len,
479
	.rx_extended_status = enh_desc_get_ext_status,
480 481 482 483
	.enable_tx_timestamp = enh_desc_enable_tx_timestamp,
	.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
	.get_timestamp = enh_desc_get_timestamp,
	.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
484
	.display_ring = enh_desc_display_ring,
485
	.get_addr = enh_desc_get_addr,
486
	.set_addr = enh_desc_set_addr,
487
	.clear = enh_desc_clear,
488
};