dwmac4_descs.c 11.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * This contains the functions to handle the descriptors for DesignWare databook
 * 4.xx.
 *
 * Copyright (C) 2015  STMicroelectronics Ltd
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * Author: Alexandre Torgue <alexandre.torgue@st.com>
 */

#include <linux/stmmac.h>
#include "common.h"
#include "dwmac4_descs.h"

static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
				       struct dma_desc *p,
				       void __iomem *ioaddr)
{
	struct net_device_stats *stats = (struct net_device_stats *)data;
	unsigned int tdes3;
	int ret = tx_done;

26
	tdes3 = le32_to_cpu(p->des3);
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

	/* Get tx owner first */
	if (unlikely(tdes3 & TDES3_OWN))
		return tx_dma_own;

	/* Verify tx error by looking at the last segment. */
	if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
		return tx_not_ls;

	if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
		if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
			x->tx_jabber++;
		if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
			x->tx_frame_flushed++;
		if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
			x->tx_losscarrier++;
			stats->tx_carrier_errors++;
		}
		if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
			x->tx_carrier++;
			stats->tx_carrier_errors++;
		}
		if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
			     (tdes3 & TDES3_EXCESSIVE_COLLISION)))
			stats->collisions +=
			    (tdes3 & TDES3_COLLISION_COUNT_MASK)
			    >> TDES3_COLLISION_COUNT_SHIFT;

		if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
			x->tx_deferred++;

		if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
			x->tx_underflow++;

		if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
			x->tx_ip_header_error++;

		if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
			x->tx_payload_error++;

		ret = tx_err;
	}

	if (unlikely(tdes3 & TDES3_DEFERRED))
		x->tx_deferred++;

	return ret;
}

static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
				       struct dma_desc *p)
{
	struct net_device_stats *stats = (struct net_device_stats *)data;
80 81 82
	unsigned int rdes1 = le32_to_cpu(p->des1);
	unsigned int rdes2 = le32_to_cpu(p->des2);
	unsigned int rdes3 = le32_to_cpu(p->des3);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
	int message_type;
	int ret = good_frame;

	if (unlikely(rdes3 & RDES3_OWN))
		return dma_own;

	/* Verify rx error by looking at the last segment. */
	if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
		return discard_frame;

	if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
		if (unlikely(rdes3 & RDES3_GIANT_PACKET))
			stats->rx_length_errors++;
		if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
			x->rx_gmac_overflow++;

		if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
			x->rx_watchdog++;

		if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
			x->rx_mii++;

		if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
106
			x->rx_crc_errors++;
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
			stats->rx_crc_errors++;
		}

		if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
			x->dribbling_bit++;

		ret = discard_frame;
	}

	message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;

	if (rdes1 & RDES1_IP_HDR_ERROR)
		x->ip_hdr_err++;
	if (rdes1 & RDES1_IP_CSUM_BYPASSED)
		x->ip_csum_bypassed++;
	if (rdes1 & RDES1_IPV4_HEADER)
		x->ipv4_pkt_rcvd++;
	if (rdes1 & RDES1_IPV6_HEADER)
		x->ipv6_pkt_rcvd++;
126 127 128 129 130

	if (message_type == RDES_EXT_NO_PTP)
		x->no_ptp_rx_msg_type_ext++;
	else if (message_type == RDES_EXT_SYNC)
		x->ptp_rx_msg_type_sync++;
131
	else if (message_type == RDES_EXT_FOLLOW_UP)
132
		x->ptp_rx_msg_type_follow_up++;
133
	else if (message_type == RDES_EXT_DELAY_REQ)
134
		x->ptp_rx_msg_type_delay_req++;
135
	else if (message_type == RDES_EXT_DELAY_RESP)
136
		x->ptp_rx_msg_type_delay_resp++;
137
	else if (message_type == RDES_EXT_PDELAY_REQ)
138
		x->ptp_rx_msg_type_pdelay_req++;
139
	else if (message_type == RDES_EXT_PDELAY_RESP)
140
		x->ptp_rx_msg_type_pdelay_resp++;
141
	else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
142 143 144 145 146 147 148
		x->ptp_rx_msg_type_pdelay_follow_up++;
	else if (message_type == RDES_PTP_ANNOUNCE)
		x->ptp_rx_msg_type_announce++;
	else if (message_type == RDES_PTP_MANAGEMENT)
		x->ptp_rx_msg_type_management++;
	else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
		x->ptp_rx_msg_pkt_reserved_type++;
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

	if (rdes1 & RDES1_PTP_PACKET_TYPE)
		x->ptp_frame_type++;
	if (rdes1 & RDES1_PTP_VER)
		x->ptp_ver++;
	if (rdes1 & RDES1_TIMESTAMP_DROPPED)
		x->timestamp_dropped++;

	if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
		x->sa_rx_filter_fail++;
		ret = discard_frame;
	}
	if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
		x->da_rx_filter_fail++;
		ret = discard_frame;
	}

	if (rdes2 & RDES2_L3_FILTER_MATCH)
		x->l3_filter_match++;
	if (rdes2 & RDES2_L4_FILTER_MATCH)
		x->l4_filter_match++;
	if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
	    >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
		x->l3_l4_filter_no_match++;

	return ret;
}

static int dwmac4_rd_get_tx_len(struct dma_desc *p)
{
179
	return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
180 181 182 183
}

static int dwmac4_get_tx_owner(struct dma_desc *p)
{
184
	return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
185 186 187 188
}

static void dwmac4_set_tx_owner(struct dma_desc *p)
{
189
	p->des3 |= cpu_to_le32(TDES3_OWN);
190 191 192 193
}

static void dwmac4_set_rx_owner(struct dma_desc *p)
{
194
	p->des3 |= cpu_to_le32(RDES3_OWN);
195 196 197 198
}

static int dwmac4_get_tx_ls(struct dma_desc *p)
{
199 200
	return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
		>> TDES3_LAST_DESCRIPTOR_SHIFT;
201 202 203 204
}

static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
{
205
	return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
206 207 208 209
}

static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
{
210
	p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
211 212 213 214
}

static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{
215
	/* Context type from W/B descriptor must be zero */
216
	if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
217
		return 0;
218 219

	/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
220
	if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
221
		return 1;
222

223
	return 0;
224 225
}

226
static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
227 228 229 230
{
	struct dma_desc *p = (struct dma_desc *)desc;
	u64 ns;

231
	ns = le32_to_cpu(p->des0);
232
	/* convert high/sec time stamp value to nanosecond */
233
	ns += le32_to_cpu(p->des1) * 1000000000ULL;
234 235 236 237

	return ns;
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static int dwmac4_rx_check_timestamp(void *desc)
{
	struct dma_desc *p = (struct dma_desc *)desc;
	u32 own, ctxt;
	int ret = 1;

	own = p->des3 & RDES3_OWN;
	ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
		>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);

	if (likely(!own && ctxt)) {
		if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
			/* Corrupted value */
			ret = -EINVAL;
		else
			/* A valid Timestamp is ready to be read */
			ret = 0;
	}

	/* Timestamp not ready */
	return ret;
}

261 262
static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
						 u32 ats)
263 264
{
	struct dma_desc *p = (struct dma_desc *)desc;
265 266 267 268
	int ret = -EINVAL;

	/* Get the status from normal w/b descriptor */
	if (likely(p->des3 & TDES3_RS1V)) {
269
		if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
270 271 272 273
			int i = 0;

			/* Check if timestamp is OK from context descriptor */
			do {
274
				ret = dwmac4_rx_check_timestamp(next_desc);
275 276 277
				if (ret < 0)
					goto exit;
				i++;
278

279
			} while ((ret == 1) && (i < 10));
280 281 282 283 284 285

			if (i == 10)
				ret = -EBUSY;
		}
	}
exit:
286 287 288 289
	if (likely(ret == 0))
		return 1;

	return 0;
290 291 292 293 294
}

static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
				   int mode, int end)
{
295
	p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
296 297

	if (!disable_rx_ic)
298
		p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
299 300 301 302 303 304 305 306 307 308 309 310
}

static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
{
	p->des0 = 0;
	p->des1 = 0;
	p->des2 = 0;
	p->des3 = 0;
}

static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
				      bool csum_flag, int mode, bool tx_own,
311
				      bool ls, unsigned int tot_pkt_len)
312
{
313
	unsigned int tdes3 = le32_to_cpu(p->des3);
314

315
	p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
316

317
	tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	if (is_fs)
		tdes3 |= TDES3_FIRST_DESCRIPTOR;
	else
		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;

	if (likely(csum_flag))
		tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
	else
		tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);

	if (ls)
		tdes3 |= TDES3_LAST_DESCRIPTOR;
	else
		tdes3 &= ~TDES3_LAST_DESCRIPTOR;

	/* Finally set the OWN bit. Later the DMA will start! */
	if (tx_own)
		tdes3 |= TDES3_OWN;

337
	if (is_fs && tx_own)
338 339 340 341
		/* When the own bit, for the first frame, has to be set, all
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
P
Pavel Machek 已提交
342
		dma_wmb();
343

344
	p->des3 = cpu_to_le32(tdes3);
345 346 347 348 349 350 351
}

static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
					  int len1, int len2, bool tx_own,
					  bool ls, unsigned int tcphdrlen,
					  unsigned int tcppayloadlen)
{
352
	unsigned int tdes3 = le32_to_cpu(p->des3);
353 354

	if (len1)
355
		p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
356 357

	if (len2)
358 359
		p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
			    & TDES2_BUFFER2_SIZE_MASK);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379

	if (is_fs) {
		tdes3 |= TDES3_FIRST_DESCRIPTOR |
			 TDES3_TCP_SEGMENTATION_ENABLE |
			 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
			  TDES3_SLOT_NUMBER_MASK) |
			 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
	} else {
		tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
	}

	if (ls)
		tdes3 |= TDES3_LAST_DESCRIPTOR;
	else
		tdes3 &= ~TDES3_LAST_DESCRIPTOR;

	/* Finally set the OWN bit. Later the DMA will start! */
	if (tx_own)
		tdes3 |= TDES3_OWN;

380
	if (is_fs && tx_own)
381 382 383 384
		/* When the own bit, for the first frame, has to be set, all
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
P
Pavel Machek 已提交
385
		dma_wmb();
386

387
	p->des3 = cpu_to_le32(tdes3);
388 389 390 391
}

static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
{
392 393
	p->des0 = 0;
	p->des1 = 0;
394 395 396 397 398 399
	p->des2 = 0;
	p->des3 = 0;
}

static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
{
400
	p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
401 402 403 404 405 406 407 408 409 410
}

static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
{
	struct dma_desc *p = (struct dma_desc *)head;
	int i;

	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");

	for (i = 0; i < size; i++) {
411
		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
412
			i, (unsigned int)virt_to_phys(p),
413 414
			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
415 416 417 418 419 420 421 422
		p++;
	}
}

static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
{
	p->des0 = 0;
	p->des1 = 0;
423 424
	p->des2 = cpu_to_le32(mss);
	p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
425 426 427 428 429 430 431 432 433 434 435 436 437
}

const struct stmmac_desc_ops dwmac4_desc_ops = {
	.tx_status = dwmac4_wrback_get_tx_status,
	.rx_status = dwmac4_wrback_get_rx_status,
	.get_tx_len = dwmac4_rd_get_tx_len,
	.get_tx_owner = dwmac4_get_tx_owner,
	.set_tx_owner = dwmac4_set_tx_owner,
	.set_rx_owner = dwmac4_set_rx_owner,
	.get_tx_ls = dwmac4_get_tx_ls,
	.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
	.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
	.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
438 439
	.get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
	.get_timestamp = dwmac4_get_timestamp,
440 441 442 443 444 445 446 447 448 449 450
	.set_tx_ic = dwmac4_rd_set_tx_ic,
	.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
	.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
	.release_tx_desc = dwmac4_release_tx_desc,
	.init_rx_desc = dwmac4_rd_init_rx_desc,
	.init_tx_desc = dwmac4_rd_init_tx_desc,
	.display_ring = dwmac4_display_ring,
	.set_mss = dwmac4_set_mss_ctxt,
};

const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };