pio.c 19.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*

  Broadcom B43 wireless driver

  PIO data transfer

  Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>

  This program is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation; either version 2 of the License, or
  (at your option) any later version.

  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program; see the file COPYING.  If not, write to
  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  Boston, MA 02110-1301, USA.

*/

#include "b43.h"
#include "pio.h"
#include "dma.h"
#include "main.h"
#include "xmit.h"

#include <linux/delay.h>


static void b43_pio_rx_work(struct work_struct *work);


static u16 generate_cookie(struct b43_pio_txqueue *q,
			   struct b43_pio_txpacket *pack)
{
	u16 cookie;

	/* Use the upper 4 bits of the cookie as
	 * PIO controller ID and store the packet index number
	 * in the lower 12 bits.
	 * Note that the cookie must never be 0, as this
	 * is a special value used in RX path.
	 * It can also not be 0xFFFF because that is special
	 * for multicast frames.
	 */
	cookie = (((u16)q->index + 1) << 12);
	cookie |= pack->index;

	return cookie;
}

static
J
John Daiker 已提交
58 59
struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
				     u16 cookie,
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
				      struct b43_pio_txpacket **pack)
{
	struct b43_pio *pio = &dev->pio;
	struct b43_pio_txqueue *q = NULL;
	unsigned int pack_index;

	switch (cookie & 0xF000) {
	case 0x1000:
		q = pio->tx_queue_AC_BK;
		break;
	case 0x2000:
		q = pio->tx_queue_AC_BE;
		break;
	case 0x3000:
		q = pio->tx_queue_AC_VI;
		break;
	case 0x4000:
		q = pio->tx_queue_AC_VO;
		break;
	case 0x5000:
		q = pio->tx_queue_mcast;
		break;
	}
	if (B43_WARN_ON(!q))
		return NULL;
	pack_index = (cookie & 0x0FFF);
	if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
		return NULL;
	*pack = &q->packets[pack_index];

	return q;
}

static u16 index_to_pioqueue_base(struct b43_wldev *dev,
				  unsigned int index)
{
	static const u16 bases[] = {
		B43_MMIO_PIO_BASE0,
		B43_MMIO_PIO_BASE1,
		B43_MMIO_PIO_BASE2,
		B43_MMIO_PIO_BASE3,
		B43_MMIO_PIO_BASE4,
		B43_MMIO_PIO_BASE5,
		B43_MMIO_PIO_BASE6,
		B43_MMIO_PIO_BASE7,
	};
	static const u16 bases_rev11[] = {
		B43_MMIO_PIO11_BASE0,
		B43_MMIO_PIO11_BASE1,
		B43_MMIO_PIO11_BASE2,
		B43_MMIO_PIO11_BASE3,
		B43_MMIO_PIO11_BASE4,
		B43_MMIO_PIO11_BASE5,
	};

	if (dev->dev->id.revision >= 11) {
		B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
		return bases_rev11[index];
	}
	B43_WARN_ON(index >= ARRAY_SIZE(bases));
	return bases[index];
}

static u16 pio_txqueue_offset(struct b43_wldev *dev)
{
	if (dev->dev->id.revision >= 11)
		return 0x18;
	return 0;
}

static u16 pio_rxqueue_offset(struct b43_wldev *dev)
{
	if (dev->dev->id.revision >= 11)
		return 0x38;
	return 8;
}

J
John Daiker 已提交
137 138
static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
						     unsigned int index)
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
{
	struct b43_pio_txqueue *q;
	struct b43_pio_txpacket *p;
	unsigned int i;

	q = kzalloc(sizeof(*q), GFP_KERNEL);
	if (!q)
		return NULL;
	q->dev = dev;
	q->rev = dev->dev->id.revision;
	q->mmio_base = index_to_pioqueue_base(dev, index) +
		       pio_txqueue_offset(dev);
	q->index = index;

	q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
	if (q->rev >= 8) {
		q->buffer_size = 1920; //FIXME this constant is wrong.
	} else {
		q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
		q->buffer_size -= 80;
	}

	INIT_LIST_HEAD(&q->packets_list);
	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
		p = &(q->packets[i]);
		INIT_LIST_HEAD(&p->list);
		p->index = i;
		p->queue = q;
		list_add(&p->list, &q->packets_list);
	}

	return q;
}

J
John Daiker 已提交
173 174
static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
						     unsigned int index)
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
{
	struct b43_pio_rxqueue *q;

	q = kzalloc(sizeof(*q), GFP_KERNEL);
	if (!q)
		return NULL;
	q->dev = dev;
	q->rev = dev->dev->id.revision;
	q->mmio_base = index_to_pioqueue_base(dev, index) +
		       pio_rxqueue_offset(dev);
	INIT_WORK(&q->rx_work, b43_pio_rx_work);

	/* Enable Direct FIFO RX (PIO) on the engine. */
	b43_dma_direct_fifo_rx(dev, index, 1);

	return q;
}

static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
{
	struct b43_pio_txpacket *pack;
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
		pack = &(q->packets[i]);
		if (pack->skb) {
			dev_kfree_skb_any(pack->skb);
			pack->skb = NULL;
		}
	}
}

static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
				    const char *name)
{
	if (!q)
		return;
	b43_pio_cancel_tx_packets(q);
	kfree(q);
}

static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
				    const char *name)
{
	if (!q)
		return;
	kfree(q);
}

#define destroy_queue_tx(pio, queue) do {				\
	b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue));	\
	(pio)->queue = NULL;						\
  } while (0)

#define destroy_queue_rx(pio, queue) do {				\
	b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue));	\
	(pio)->queue = NULL;						\
  } while (0)

void b43_pio_free(struct b43_wldev *dev)
{
	struct b43_pio *pio;

	if (!b43_using_pio_transfers(dev))
		return;
	pio = &dev->pio;

	destroy_queue_rx(pio, rx_queue);
	destroy_queue_tx(pio, tx_queue_mcast);
	destroy_queue_tx(pio, tx_queue_AC_VO);
	destroy_queue_tx(pio, tx_queue_AC_VI);
	destroy_queue_tx(pio, tx_queue_AC_BE);
	destroy_queue_tx(pio, tx_queue_AC_BK);
}

void b43_pio_stop(struct b43_wldev *dev)
{
	if (!b43_using_pio_transfers(dev))
		return;
	cancel_work_sync(&dev->pio.rx_queue->rx_work);
}

int b43_pio_init(struct b43_wldev *dev)
{
	struct b43_pio *pio = &dev->pio;
	int err = -ENOMEM;

	b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
		    & ~B43_MACCTL_BE);
	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);

	pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
	if (!pio->tx_queue_AC_BK)
		goto out;

	pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
	if (!pio->tx_queue_AC_BE)
		goto err_destroy_bk;

	pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
	if (!pio->tx_queue_AC_VI)
		goto err_destroy_be;

	pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
	if (!pio->tx_queue_AC_VO)
		goto err_destroy_vi;

	pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
	if (!pio->tx_queue_mcast)
		goto err_destroy_vo;

	pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
	if (!pio->rx_queue)
		goto err_destroy_mcast;

	b43dbg(dev->wl, "PIO initialized\n");
	err = 0;
out:
	return err;

err_destroy_mcast:
	destroy_queue_tx(pio, tx_queue_mcast);
err_destroy_vo:
	destroy_queue_tx(pio, tx_queue_AC_VO);
err_destroy_vi:
	destroy_queue_tx(pio, tx_queue_AC_VI);
err_destroy_be:
	destroy_queue_tx(pio, tx_queue_AC_BE);
err_destroy_bk:
	destroy_queue_tx(pio, tx_queue_AC_BK);
	return err;
}

/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
J
John Daiker 已提交
309 310
static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
							u8 queue_prio)
311 312 313
{
	struct b43_pio_txqueue *q;

M
Michael Buesch 已提交
314
	if (dev->qos_enabled) {
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
		/* 0 = highest priority */
		switch (queue_prio) {
		default:
			B43_WARN_ON(1);
			/* fallthrough */
		case 0:
			q = dev->pio.tx_queue_AC_VO;
			break;
		case 1:
			q = dev->pio.tx_queue_AC_VI;
			break;
		case 2:
			q = dev->pio.tx_queue_AC_BE;
			break;
		case 3:
			q = dev->pio.tx_queue_AC_BK;
			break;
		}
	} else
		q = dev->pio.tx_queue_AC_BE;

	return q;
}

339 340 341 342
static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
				u16 ctl,
				const void *_data,
				unsigned int data_len)
343
{
344
	struct b43_wldev *dev = q->dev;
345
	const u8 *data = _data;
346 347 348 349 350 351 352 353 354 355 356 357

	ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);

	ssb_block_write(dev->dev, data, (data_len & ~1),
			q->mmio_base + B43_PIO_TXDATA,
			sizeof(u16));
	if (data_len & 1) {
		/* Write the last byte. */
		ctl &= ~B43_PIO_TXCTL_WRITEHI;
		b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
		b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
358
	}
359 360

	return ctl;
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
}

static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
				     const u8 *hdr, unsigned int hdrlen)
{
	struct b43_pio_txqueue *q = pack->queue;
	const char *frame = pack->skb->data;
	unsigned int frame_len = pack->skb->len;
	u16 ctl;

	ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
	ctl |= B43_PIO_TXCTL_FREADY;
	ctl &= ~B43_PIO_TXCTL_EOF;

	/* Transfer the header data. */
376
	ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
377
	/* Transfer the frame data. */
378
	ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
379 380 381 382 383

	ctl |= B43_PIO_TXCTL_EOF;
	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
}

384 385 386 387
static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
				u32 ctl,
				const void *_data,
				unsigned int data_len)
388
{
389
	struct b43_wldev *dev = q->dev;
390
	const u8 *data = _data;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416

	ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
	       B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
	b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);

	ssb_block_write(dev->dev, data, (data_len & ~3),
			q->mmio_base + B43_PIO8_TXDATA,
			sizeof(u32));
	if (data_len & 3) {
		u32 value = 0;

		/* Write the last few bytes. */
		ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
			 B43_PIO8_TXCTL_24_31);
		data = &(data[data_len - 1]);
		switch (data_len & 3) {
		case 3:
			ctl |= B43_PIO8_TXCTL_16_23;
			value |= (u32)(*data) << 16;
			data--;
		case 2:
			ctl |= B43_PIO8_TXCTL_8_15;
			value |= (u32)(*data) << 8;
			data--;
		case 1:
			value |= (u32)(*data);
417
		}
418
		b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
419 420
		b43_piotx_write32(q, B43_PIO8_TXDATA, value);
	}
421 422

	return ctl;
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
}

static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
				     const u8 *hdr, unsigned int hdrlen)
{
	struct b43_pio_txqueue *q = pack->queue;
	const char *frame = pack->skb->data;
	unsigned int frame_len = pack->skb->len;
	u32 ctl;

	ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
	ctl |= B43_PIO8_TXCTL_FREADY;
	ctl &= ~B43_PIO8_TXCTL_EOF;

	/* Transfer the header data. */
438
	ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
439
	/* Transfer the frame data. */
440
	ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
441 442 443 444 445 446

	ctl |= B43_PIO8_TXCTL_EOF;
	b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
}

static int pio_tx_frame(struct b43_pio_txqueue *q,
447
			struct sk_buff *skb)
448 449 450 451 452 453
{
	struct b43_pio_txpacket *pack;
	struct b43_txhdr txhdr;
	u16 cookie;
	int err;
	unsigned int hdrlen;
454
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
455 456 457 458 459 460 461

	B43_WARN_ON(list_empty(&q->packets_list));
	pack = list_entry(q->packets_list.next,
			  struct b43_pio_txpacket, list);

	cookie = generate_cookie(q, pack);
	hdrlen = b43_txhdr_size(q->dev);
G
gregor kowski 已提交
462 463
	err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb,
				 info, cookie);
464 465 466
	if (err)
		return err;

467
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
		/* Tell the firmware about the cookie of the last
		 * mcast frame, so it can clear the more-data bit in it. */
		b43_shm_write16(q->dev, B43_SHM_SHARED,
				B43_SHM_SH_MCASTCOOKIE, cookie);
	}

	pack->skb = skb;
	if (q->rev >= 8)
		pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen);
	else
		pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen);

	/* Remove it from the list of available packet slots.
	 * It will be put back when we receive the status report. */
	list_del(&pack->list);

	/* Update the queue statistics. */
	q->buffer_used += roundup(skb->len + hdrlen, 4);
	q->free_packet_slots -= 1;

	return 0;
}

491
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
492 493 494 495 496
{
	struct b43_pio_txqueue *q;
	struct ieee80211_hdr *hdr;
	unsigned int hdrlen, total_len;
	int err = 0;
497
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
498 499

	hdr = (struct ieee80211_hdr *)skb->data;
500 501

	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
502 503 504 505 506 507 508
		/* The multicast queue will be sent after the DTIM. */
		q = dev->pio.tx_queue_mcast;
		/* Set the frame More-Data bit. Ucode will clear it
		 * for us on the last frame. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
509
		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
510 511 512 513 514 515 516 517
	}

	hdrlen = b43_txhdr_size(dev);
	total_len = roundup(skb->len + hdrlen, 4);

	if (unlikely(total_len > q->buffer_size)) {
		err = -ENOBUFS;
		b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
M
Michael Buesch 已提交
518
		goto out;
519 520 521 522
	}
	if (unlikely(q->free_packet_slots == 0)) {
		err = -ENOBUFS;
		b43warn(dev->wl, "PIO: TX packet overflow.\n");
M
Michael Buesch 已提交
523
		goto out;
524 525 526 527 528 529
	}
	B43_WARN_ON(q->buffer_used > q->buffer_size);

	if (total_len > (q->buffer_size - q->buffer_used)) {
		/* Not enough memory on the queue. */
		err = -EBUSY;
530
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
531
		q->stopped = 1;
M
Michael Buesch 已提交
532
		goto out;
533 534 535 536 537
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The mac80211-queue to b43-queue
	 * mapping is static, so we don't need to store it per frame. */
538
	q->queue_prio = skb_get_queue_mapping(skb);
539

540
	err = pio_tx_frame(q, skb);
541 542 543 544 545
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		dev_kfree_skb_any(skb);
		err = 0;
M
Michael Buesch 已提交
546
		goto out;
547 548 549
	}
	if (unlikely(err)) {
		b43err(dev->wl, "PIO transmission failure\n");
M
Michael Buesch 已提交
550
		goto out;
551 552 553 554 555 556 557
	}
	q->nr_tx_packets++;

	B43_WARN_ON(q->buffer_used > q->buffer_size);
	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
	    (q->free_packet_slots == 0)) {
		/* The queue is full. */
558
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
559 560 561
		q->stopped = 1;
	}

M
Michael Buesch 已提交
562
out:
563 564 565 566 567 568 569 570 571
	return err;
}

void b43_pio_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	struct b43_pio_txqueue *q;
	struct b43_pio_txpacket *pack = NULL;
	unsigned int total_len;
572
	struct ieee80211_tx_info *info;
573 574 575 576 577 578

	q = parse_cookie(dev, status->cookie, &pack);
	if (unlikely(!q))
		return;
	B43_WARN_ON(!pack);

M
Michael Buesch 已提交
579
	info = IEEE80211_SKB_CB(pack->skb);
580

581
	b43_fill_txstatus_report(dev, info, status);
582 583 584 585 586 587

	total_len = pack->skb->len + b43_txhdr_size(dev);
	total_len = roundup(total_len, 4);
	q->buffer_used -= total_len;
	q->free_packet_slots += 1;

588
	ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	pack->skb = NULL;
	list_add(&pack->list, &q->packets_list);

	if (q->stopped) {
		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
		q->stopped = 0;
	}
}

void b43_pio_get_tx_stats(struct b43_wldev *dev,
			  struct ieee80211_tx_queue_stats *stats)
{
	const int nr_queues = dev->wl->hw->queues;
	struct b43_pio_txqueue *q;
	int i;

	for (i = 0; i < nr_queues; i++) {
		q = select_queue_by_priority(dev, i);

608 609 610
		stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
		stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
		stats[i].count = q->nr_tx_packets;
611 612 613 614 615 616
	}
}

/* Returns whether we should fetch another frame. */
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
617
	struct b43_wldev *dev = q->dev;
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	struct b43_rxhdr_fw4 rxhdr;
	u16 len;
	u32 macstat;
	unsigned int i, padding;
	struct sk_buff *skb;
	const char *err_msg = NULL;

	memset(&rxhdr, 0, sizeof(rxhdr));

	/* Check if we have data and wait for it to get ready. */
	if (q->rev >= 8) {
		u32 ctl;

		ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
		if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
			return 0;
		b43_piorx_write32(q, B43_PIO8_RXCTL,
				  B43_PIO8_RXCTL_FRAMERDY);
		for (i = 0; i < 10; i++) {
			ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
			if (ctl & B43_PIO8_RXCTL_DATARDY)
				goto data_ready;
			udelay(10);
		}
	} else {
		u16 ctl;

		ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
		if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
			return 0;
		b43_piorx_write16(q, B43_PIO_RXCTL,
				  B43_PIO_RXCTL_FRAMERDY);
		for (i = 0; i < 10; i++) {
			ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
			if (ctl & B43_PIO_RXCTL_DATARDY)
				goto data_ready;
			udelay(10);
		}
	}
	b43dbg(q->dev->wl, "PIO RX timed out\n");
	return 1;
data_ready:

	/* Get the preamble (RX header) */
	if (q->rev >= 8) {
663 664 665
		ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
			       q->mmio_base + B43_PIO8_RXDATA,
			       sizeof(u32));
666
	} else {
667 668 669
		ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
			       q->mmio_base + B43_PIO_RXDATA,
			       sizeof(u16));
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	}
	/* Sanity checks. */
	len = le16_to_cpu(rxhdr.frame_len);
	if (unlikely(len > 0x700)) {
		err_msg = "len > 0x700";
		goto rx_error;
	}
	if (unlikely(len == 0)) {
		err_msg = "len == 0";
		goto rx_error;
	}

	macstat = le32_to_cpu(rxhdr.mac_status);
	if (macstat & B43_RX_MAC_FCSERR) {
		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
			/* Drop frames with failed FCS. */
			err_msg = "Frame FCS error";
			goto rx_error;
		}
	}

	/* We always pad 2 bytes, as that's what upstream code expects
	 * due to the RX-header being 30 bytes. In case the frame is
	 * unaligned, we pad another 2 bytes. */
	padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
	skb = dev_alloc_skb(len + padding + 2);
	if (unlikely(!skb)) {
		err_msg = "Out of memory";
		goto rx_error;
	}
	skb_reserve(skb, 2);
	skb_put(skb, len + padding);
	if (q->rev >= 8) {
703 704 705 706 707 708 709 710
		ssb_block_read(dev->dev, skb->data + padding, (len & ~3),
			       q->mmio_base + B43_PIO8_RXDATA,
			       sizeof(u32));
		if (len & 3) {
			u32 value;
			char *data;

			/* Read the last few bytes. */
711
			value = b43_piorx_read32(q, B43_PIO8_RXDATA);
712 713 714 715 716 717 718 719 720 721 722
			data = &(skb->data[len + padding - 1]);
			switch (len & 3) {
			case 3:
				*data = (value >> 16);
				data--;
			case 2:
				*data = (value >> 8);
				data--;
			case 1:
				*data = value;
			}
723 724
		}
	} else {
725 726 727 728 729
		ssb_block_read(dev->dev, skb->data + padding, (len & ~1),
			       q->mmio_base + B43_PIO_RXDATA,
			       sizeof(u16));
		if (len & 1) {
			u16 value;
730

731
			/* Read the last byte. */
732
			value = b43_piorx_read16(q, B43_PIO_RXDATA);
733
			skb->data[len + padding - 1] = value;
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
		}
	}

	b43_rx(q->dev, skb, &rxhdr);

	return 1;

rx_error:
	if (err_msg)
		b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
	b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
	return 1;
}

/* RX workqueue. We can sleep, yay! */
static void b43_pio_rx_work(struct work_struct *work)
{
	struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue,
						 rx_work);
	unsigned int budget = 50;
	bool stop;

	do {
M
Michael Buesch 已提交
757
		mutex_lock(&q->dev->wl->mutex);
758
		stop = (pio_rx_frame(q) == 0);
M
Michael Buesch 已提交
759
		mutex_unlock(&q->dev->wl->mutex);
760 761 762 763 764 765 766 767 768 769 770
		cond_resched();
		if (stop)
			break;
	} while (--budget);
}

/* Called with IRQs disabled. */
void b43_pio_rx(struct b43_pio_rxqueue *q)
{
	/* Due to latency issues we must run the RX path in
	 * a workqueue to be able to schedule between packets. */
771
	ieee80211_queue_work(q->dev->wl->hw, &q->rx_work);
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
}

static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
{
	if (q->rev >= 8) {
		b43_piotx_write32(q, B43_PIO8_TXCTL,
				  b43_piotx_read32(q, B43_PIO8_TXCTL)
				  | B43_PIO8_TXCTL_SUSPREQ);
	} else {
		b43_piotx_write16(q, B43_PIO_TXCTL,
				  b43_piotx_read16(q, B43_PIO_TXCTL)
				  | B43_PIO_TXCTL_SUSPREQ);
	}
}

static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
{
	if (q->rev >= 8) {
		b43_piotx_write32(q, B43_PIO8_TXCTL,
				  b43_piotx_read32(q, B43_PIO8_TXCTL)
				  & ~B43_PIO8_TXCTL_SUSPREQ);
	} else {
		b43_piotx_write16(q, B43_PIO_TXCTL,
				  b43_piotx_read16(q, B43_PIO_TXCTL)
				  & ~B43_PIO_TXCTL_SUSPREQ);
	}
}

void b43_pio_tx_suspend(struct b43_wldev *dev)
{
	b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
	b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
}

void b43_pio_tx_resume(struct b43_wldev *dev)
{
	b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
	b43_power_saving_ctl_bits(dev, 0);
}