norm_desc.c 8.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*******************************************************************************
  This contains the functions to handle the normal descriptors.

  Copyright (C) 2007-2009  STMicroelectronics Ltd

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/

21
#include <linux/stmmac.h>
22
#include "common.h"
23
#include "descs_com.h"
24 25

static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
26
			       struct dma_desc *p, void __iomem *ioaddr)
27 28
{
	struct net_device_stats *stats = (struct net_device_stats *)data;
29 30
	unsigned int tdes0 = le32_to_cpu(p->des0);
	unsigned int tdes1 = le32_to_cpu(p->des1);
31 32 33 34 35 36 37 38 39
	int ret = tx_done;

	/* Get tx owner first */
	if (unlikely(tdes0 & TDES0_OWN))
		return tx_dma_own;

	/* Verify tx error by looking at the last segment. */
	if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
		return tx_not_ls;
40

41 42
	if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
		if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
43 44 45
			x->tx_underflow++;
			stats->tx_fifo_errors++;
		}
46
		if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
47 48 49
			x->tx_carrier++;
			stats->tx_carrier_errors++;
		}
50
		if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
51 52 53
			x->tx_losscarrier++;
			stats->tx_carrier_errors++;
		}
54 55 56 57 58 59 60 61
		if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
			     (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
			     (tdes0 & TDES0_LATE_COLLISION))) {
			unsigned int collisions;

			collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
			stats->collisions += collisions;
		}
62
		ret = tx_err;
63
	}
64

65
	if (tdes0 & TDES0_VLAN_FRAME)
66 67
		x->tx_vlan++;

68
	if (unlikely(tdes0 & TDES0_DEFERRED))
69 70 71 72 73 74 75
		x->tx_deferred++;

	return ret;
}

static int ndesc_get_tx_len(struct dma_desc *p)
{
76
	return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
77 78 79 80
}

/* This function verifies if each incoming frame has some errors
 * and, if required, updates the multicast statistics.
81 82
 * In case of success, it returns good_frame because the GMAC device
 * is supposed to be able to compute the csum in HW. */
83 84 85
static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
			       struct dma_desc *p)
{
86
	int ret = good_frame;
87
	unsigned int rdes0 = le32_to_cpu(p->des0);
88 89
	struct net_device_stats *stats = (struct net_device_stats *)data;

90 91 92
	if (unlikely(rdes0 & RDES0_OWN))
		return dma_own;

93
	if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
G
Giuseppe CAVALLARO 已提交
94 95
		pr_warn("%s: Oversized frame spanned multiple buffers\n",
			__func__);
96 97 98 99
		stats->rx_length_errors++;
		return discard_frame;
	}

100 101
	if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
		if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
102
			x->rx_desc++;
103
		if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
104
			x->sa_filter_fail++;
105
		if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
106
			x->overflow_error++;
107
		if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
108
			x->ipc_csum_error++;
109
		if (unlikely(rdes0 & RDES0_COLLISION)) {
110 111 112
			x->rx_collision++;
			stats->collisions++;
		}
113
		if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
114
			x->rx_crc_errors++;
115 116 117 118
			stats->rx_crc_errors++;
		}
		ret = discard_frame;
	}
119
	if (unlikely(rdes0 & RDES0_DRIBBLING))
120
		x->dribbling_bit++;
121

122
	if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
123 124 125
		x->rx_length++;
		ret = discard_frame;
	}
126
	if (unlikely(rdes0 & RDES0_MII_ERROR)) {
127 128 129
		x->rx_mii++;
		ret = discard_frame;
	}
130
#ifdef STMMAC_VLAN_TAG_USED
131
	if (rdes0 & RDES0_VLAN_TAG)
132 133
		x->vlan_tag++;
#endif
134 135 136
	return ret;
}

137 138
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
			       int end)
139
{
140 141
	p->des0 |= cpu_to_le32(RDES0_OWN);
	p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
142 143 144 145 146 147 148

	if (mode == STMMAC_CHAIN_MODE)
		ndesc_rx_set_on_chain(p, end);
	else
		ndesc_rx_set_on_ring(p, end);

	if (disable_rx_ic)
149
		p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
150 151
}

152
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
153
{
154
	p->des0 &= cpu_to_le32(~TDES0_OWN);
155
	if (mode == STMMAC_CHAIN_MODE)
156
		ndesc_tx_set_on_chain(p);
157
	else
158
		ndesc_end_tx_desc_on_ring(p, end);
159 160 161 162
}

static int ndesc_get_tx_owner(struct dma_desc *p)
{
163
	return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
164 165 166 167
}

static void ndesc_set_tx_owner(struct dma_desc *p)
{
168
	p->des0 |= cpu_to_le32(TDES0_OWN);
169 170 171 172
}

static void ndesc_set_rx_owner(struct dma_desc *p)
{
173
	p->des0 |= cpu_to_le32(RDES0_OWN);
174 175 176 177
}

static int ndesc_get_tx_ls(struct dma_desc *p)
{
178
	return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
179 180
}

181
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
182
{
183
	int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
184

185
	memset(p, 0, offsetof(struct dma_desc, des2));
186
	if (mode == STMMAC_CHAIN_MODE)
187
		ndesc_tx_set_on_chain(p);
188 189
	else
		ndesc_end_tx_desc_on_ring(p, ter);
190 191 192
}

static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
193
				  bool csum_flag, int mode, bool tx_own,
194
				  bool ls, unsigned int tot_pkt_len)
195
{
196
	unsigned int tdes1 = le32_to_cpu(p->des1);
197 198 199 200 201 202 203 204 205 206 207

	if (is_fs)
		tdes1 |= TDES1_FIRST_SEGMENT;
	else
		tdes1 &= ~TDES1_FIRST_SEGMENT;

	if (likely(csum_flag))
		tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
	else
		tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);

208 209 210
	if (ls)
		tdes1 |= TDES1_LAST_SEGMENT;

211
	p->des1 = cpu_to_le32(tdes1);
G
Giuseppe CAVALLARO 已提交
212 213 214 215 216 217 218

	if (mode == STMMAC_CHAIN_MODE)
		norm_set_tx_desc_len_on_chain(p, len);
	else
		norm_set_tx_desc_len_on_ring(p, len);

	if (tx_own)
219
		p->des0 |= cpu_to_le32(TDES0_OWN);
220 221
}

222
static void ndesc_set_tx_ic(struct dma_desc *p)
223
{
224
	p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
225 226
}

227
static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
228
{
229 230
	unsigned int csum = 0;

231 232 233 234
	/* The type-1 checksum offload engines append the checksum at
	 * the end of frame and the two bytes of checksum are added in
	 * the length.
	 * Adjust for that in the framelen for type-1 checksum offload
235 236
	 * engines
	 */
237
	if (rx_coe_type == STMMAC_RX_COE_TYPE1)
238 239
		csum = 2;

240 241
	return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
				>> RDES0_FRAME_LEN_SHIFT) -
242 243
		csum);

244 245
}

246 247
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
{
248
	p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
249 250 251 252
}

static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
{
253
	return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
254 255 256 257 258 259 260
}

static u64 ndesc_get_timestamp(void *desc, u32 ats)
{
	struct dma_desc *p = (struct dma_desc *)desc;
	u64 ns;

261
	ns = le32_to_cpu(p->des2);
262
	/* convert high/sec time stamp value to nanosecond */
263
	ns += le32_to_cpu(p->des3) * 1000000000ULL;
264 265 266 267

	return ns;
}

268
static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
269 270 271
{
	struct dma_desc *p = (struct dma_desc *)desc;

272 273
	if ((le32_to_cpu(p->des2) == 0xffffffff) &&
	    (le32_to_cpu(p->des3) == 0xffffffff))
274 275 276 277 278 279
		/* timestamp is corrupted, hence don't store it */
		return 0;
	else
		return 1;
}

280 281 282 283 284 285 286 287 288 289 290
static void ndesc_display_ring(void *head, unsigned int size, bool rx)
{
	struct dma_desc *p = (struct dma_desc *)head;
	int i;

	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");

	for (i = 0; i < size; i++) {
		u64 x;

		x = *(u64 *)p;
291
		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
292 293 294 295 296 297 298 299
			i, (unsigned int)virt_to_phys(p),
			(unsigned int)x, (unsigned int)(x >> 32),
			p->des2, p->des3);
		p++;
	}
	pr_info("\n");
}

300
const struct stmmac_desc_ops ndesc_ops = {
301 302 303 304 305 306 307 308
	.tx_status = ndesc_get_tx_status,
	.rx_status = ndesc_get_rx_status,
	.get_tx_len = ndesc_get_tx_len,
	.init_rx_desc = ndesc_init_rx_desc,
	.init_tx_desc = ndesc_init_tx_desc,
	.get_tx_owner = ndesc_get_tx_owner,
	.release_tx_desc = ndesc_release_tx_desc,
	.prepare_tx_desc = ndesc_prepare_tx_desc,
309
	.set_tx_ic = ndesc_set_tx_ic,
310 311 312 313
	.get_tx_ls = ndesc_get_tx_ls,
	.set_tx_owner = ndesc_set_tx_owner,
	.set_rx_owner = ndesc_set_rx_owner,
	.get_rx_frame_len = ndesc_get_rx_frame_len,
314 315 316 317
	.enable_tx_timestamp = ndesc_enable_tx_timestamp,
	.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
	.get_timestamp = ndesc_get_timestamp,
	.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
318
	.display_ring = ndesc_display_ring,
319
};