rt2x00usb.c 18.0 KB
Newer Older
1
/*
I
Ivo van Doorn 已提交
2
	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00usb
	Abstract: rt2x00 generic usb device routines.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
29
#include <linux/bug.h>
30 31 32 33 34 35 36

#include "rt2x00.h"
#include "rt2x00usb.h"

/*
 * Interfacing with the HW.
 */
A
Adam Baker 已提交
37
int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 39 40
			     const u8 request, const u8 requesttype,
			     const u16 offset, const u16 value,
			     void *buffer, const u16 buffer_length,
41
			     const int timeout)
42
{
I
Ivo van Doorn 已提交
43
	struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
44 45 46 47 48 49
	int status;
	unsigned int i;
	unsigned int pipe =
	    (requesttype == USB_VENDOR_REQUEST_IN) ?
	    usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);

50

51 52 53 54 55 56 57 58
	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
		status = usb_control_msg(usb_dev, pipe, request, requesttype,
					 value, offset, buffer, buffer_length,
					 timeout);
		if (status >= 0)
			return 0;

		/*
59
		 * Check for errors
60
		 * -ENODEV: Device has disappeared, no point continuing.
61
		 * All other errors: Try again.
62 63 64 65 66 67 68 69 70 71 72 73 74
		 */
		else if (status == -ENODEV)
			break;
	}

	ERROR(rt2x00dev,
	      "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
	      request, offset, status);

	return status;
}
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);

75 76 77 78
int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
				   const u8 request, const u8 requesttype,
				   const u16 offset, void *buffer,
				   const u16 buffer_length, const int timeout)
79 80 81
{
	int status;

82 83
	BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex));

84 85 86
	/*
	 * Check for Cache availability.
	 */
87
	if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
88 89 90 91 92
		ERROR(rt2x00dev, "CSR cache not available.\n");
		return -ENOMEM;
	}

	if (requesttype == USB_VENDOR_REQUEST_OUT)
93
		memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
94 95

	status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
96
					  offset, 0, rt2x00dev->csr.cache,
97 98 99
					  buffer_length, timeout);

	if (!status && requesttype == USB_VENDOR_REQUEST_IN)
100
		memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
101 102 103

	return status;
}
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);

int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
				  const u8 request, const u8 requesttype,
				  const u16 offset, void *buffer,
				  const u16 buffer_length, const int timeout)
{
	int status;

	mutex_lock(&rt2x00dev->usb_cache_mutex);

	status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
						requesttype, offset, buffer,
						buffer_length, timeout);

	mutex_unlock(&rt2x00dev->usb_cache_mutex);

	return status;
}
123 124 125 126 127 128 129
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);

/*
 * TX data handlers.
 */
static void rt2x00usb_interrupt_txdone(struct urb *urb)
{
I
Ivo van Doorn 已提交
130 131 132
	struct queue_entry *entry = (struct queue_entry *)urb->context;
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct txdone_entry_desc txdesc;
133
	enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
134 135

	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
I
Ivo van Doorn 已提交
136
	    !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
137 138 139 140 141
		return;

	/*
	 * Remove the descriptor data from the buffer.
	 */
I
Ivo van Doorn 已提交
142
	skb_pull(entry->skb, entry->queue->desc_size);
143 144 145

	/*
	 * Obtain the status about this packet.
I
Ivo van Doorn 已提交
146 147 148 149 150 151
	 * Note that when the status is 0 it does not mean the
	 * frame was send out correctly. It only means the frame
	 * was succesfully pushed to the hardware, we have no
	 * way to determine the transmission status right now.
	 * (Only indirectly by looking at the failed TX counters
	 * in the register).
152
	 */
I
Ivo van Doorn 已提交
153 154 155 156
	if (!urb->status)
		__set_bit(TXDONE_UNKNOWN, &txdesc.flags);
	else
		__set_bit(TXDONE_FAILURE, &txdesc.flags);
I
Ivo van Doorn 已提交
157
	txdesc.retry = 0;
158

I
Ivo van Doorn 已提交
159
	rt2x00lib_txdone(entry, &txdesc);
160 161 162 163 164

	/*
	 * Make this entry available for reuse.
	 */
	entry->flags = 0;
I
Ivo van Doorn 已提交
165
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
166 167

	/*
I
Ivo van Doorn 已提交
168
	 * If the data queue was full before the txdone handler
169 170 171
	 * we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
I
Ivo van Doorn 已提交
172
	if (!rt2x00queue_full(entry->queue))
173
		ieee80211_wake_queue(rt2x00dev->hw, qid);
174 175
}

176
int rt2x00usb_write_tx_data(struct queue_entry *entry)
177
{
178
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
I
Ivo van Doorn 已提交
179
	struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
180
	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
I
Ivo van Doorn 已提交
181
	struct skb_frame_desc *skbdesc;
182
	u32 length;
183 184 185 186

	/*
	 * Add the descriptor in front of the skb.
	 */
187 188
	skb_push(entry->skb, entry->queue->desc_size);
	memset(entry->skb->data, 0, entry->queue->desc_size);
189

I
Ivo van Doorn 已提交
190 191 192
	/*
	 * Fill in skb descriptor
	 */
193
	skbdesc = get_skb_frame_desc(entry->skb);
194
	memset(skbdesc, 0, sizeof(*skbdesc));
195 196 197 198
	skbdesc->data = entry->skb->data + entry->queue->desc_size;
	skbdesc->data_len = entry->skb->len - entry->queue->desc_size;
	skbdesc->desc = entry->skb->data;
	skbdesc->desc_len = entry->queue->desc_size;
I
Ivo van Doorn 已提交
199
	skbdesc->entry = entry;
I
Ivo van Doorn 已提交
200

201
	/*
202 203 204
	 * USB devices cannot blindly pass the skb->len as the
	 * length of the data to usb_fill_bulk_urb. Pass the skb
	 * to the driver to determine what the length should be.
205
	 */
206
	length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, entry->skb);
207

208 209 210 211
	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
			  usb_sndbulkpipe(usb_dev, 1),
			  entry->skb->data, length,
			  rt2x00usb_interrupt_txdone, entry);
212 213 214 215 216

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
{
	struct queue_entry_priv_usb *entry_priv = entry->priv_data;

	if (__test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
		usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
}

void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
			     const enum data_queue_qid qid)
{
	struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
	unsigned long irqflags;
	unsigned int index;
	unsigned int index_done;
	unsigned int i;

	/*
	 * Only protect the range we are going to loop over,
	 * if during our loop a extra entry is set to pending
	 * it should not be kicked during this run, since it
	 * is part of another TX operation.
	 */
	spin_lock_irqsave(&queue->lock, irqflags);
	index = queue->index[Q_INDEX];
	index_done = queue->index[Q_INDEX_DONE];
	spin_unlock_irqrestore(&queue->lock, irqflags);

	/*
	 * Start from the TX done pointer, this guarentees that we will
	 * send out all frames in the correct order.
	 */
	if (index_done < index) {
		for (i = index_done; i < index; i++)
			rt2x00usb_kick_tx_entry(&queue->entries[i]);
	} else {
		for (i = index_done; i < queue->limit; i++)
			rt2x00usb_kick_tx_entry(&queue->entries[i]);

		for (i = 0; i < index; i++)
			rt2x00usb_kick_tx_entry(&queue->entries[i]);
	}
}
EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);

262 263 264
/*
 * RX data handlers.
 */
I
Ivo van Doorn 已提交
265 266 267 268
static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
{
	struct sk_buff *skb;
	unsigned int frame_size;
269
	unsigned int reserved_size;
I
Ivo van Doorn 已提交
270 271

	/*
272 273
	 * The frame size includes descriptor size, because the
	 * hardware directly receive the frame into the skbuffer.
I
Ivo van Doorn 已提交
274 275
	 */
	frame_size = queue->data_size + queue->desc_size;
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	/*
	 * For the allocation we should keep a few things in mind:
	 * 1) 4byte alignment of 802.11 payload
	 *
	 * For (1) we need at most 4 bytes to guarentee the correct
	 * alignment. We are going to optimize the fact that the chance
	 * that the 802.11 header_size % 4 == 2 is much bigger then
	 * anything else. However since we need to move the frame up
	 * to 3 bytes to the front, which means we need to preallocate
	 * 6 bytes.
	 */
	reserved_size = 6;

	/*
	 * Allocate skbuffer.
	 */
	skb = dev_alloc_skb(frame_size + reserved_size);
I
Ivo van Doorn 已提交
294 295 296
	if (!skb)
		return NULL;

297
	skb_reserve(skb, reserved_size);
I
Ivo van Doorn 已提交
298 299 300 301 302
	skb_put(skb, frame_size);

	return skb;
}

303 304
static void rt2x00usb_interrupt_rxdone(struct urb *urb)
{
I
Ivo van Doorn 已提交
305 306
	struct queue_entry *entry = (struct queue_entry *)urb->context;
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
307
	struct sk_buff *skb;
I
Ivo van Doorn 已提交
308 309
	struct skb_frame_desc *skbdesc;
	struct rxdone_entry_desc rxdesc;
310 311
	unsigned int header_size;
	unsigned int align;
312 313

	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
I
Ivo van Doorn 已提交
314
	    !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
315 316 317 318 319 320 321
		return;

	/*
	 * Check if the received data is simply too small
	 * to be actually valid, or if the urb is signaling
	 * a problem.
	 */
I
Ivo van Doorn 已提交
322
	if (urb->actual_length < entry->queue->desc_size || urb->status)
323 324
		goto skip_entry;

325 326 327
	/*
	 * Fill in skb descriptor
	 */
I
Ivo van Doorn 已提交
328 329
	skbdesc = get_skb_frame_desc(entry->skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
330 331
	skbdesc->entry = entry;

I
Ivo van Doorn 已提交
332 333
	memset(&rxdesc, 0, sizeof(rxdesc));
	rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
334

335 336
	header_size = ieee80211_get_hdrlen_from_skb(entry->skb);

337 338
	/*
	 * The data behind the ieee80211 header must be
339 340 341 342 343
	 * aligned on a 4 byte boundary. We already reserved
	 * 2 bytes for header_size % 4 == 2 optimization.
	 * To determine the number of bytes which the data
	 * should be moved to the left, we must add these
	 * 2 bytes to the header_size.
344
	 */
345 346 347 348 349 350 351
	align = (header_size + 2) % 4;

	if (align) {
		skb_push(entry->skb, align);
		/* Move entire frame in 1 command */
		memmove(entry->skb->data, entry->skb->data + align,
			rxdesc.size);
352 353
	}

354 355 356 357
	/* Update data pointers, trim buffer to correct size */
	skbdesc->data = entry->skb->data;
	skb_trim(entry->skb, rxdesc.size);

358 359 360 361 362
	/*
	 * Allocate a new sk buffer to replace the current one.
	 * If allocation fails, we should drop the current frame
	 * so we can recycle the existing sk buffer for the new frame.
	 */
I
Ivo van Doorn 已提交
363
	skb = rt2x00usb_alloc_rxskb(entry->queue);
364 365 366 367 368 369
	if (!skb)
		goto skip_entry;

	/*
	 * Send the frame to rt2x00lib for further processing.
	 */
I
Ivo van Doorn 已提交
370
	rt2x00lib_rxdone(entry, &rxdesc);
371 372 373 374 375 376 377 378 379 380

	/*
	 * Replace current entry's skb with the newly allocated one,
	 * and reinitialize the urb.
	 */
	entry->skb = skb;
	urb->transfer_buffer = entry->skb->data;
	urb->transfer_buffer_length = entry->skb->len;

skip_entry:
I
Ivo van Doorn 已提交
381 382
	if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
		__set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
383 384 385
		usb_submit_urb(urb, GFP_ATOMIC);
	}

I
Ivo van Doorn 已提交
386
	rt2x00queue_index_inc(entry->queue, Q_INDEX);
387 388 389 390 391 392 393
}

/*
 * Radio handlers
 */
void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
394 395
	struct queue_entry_priv_usb *entry_priv;
	struct queue_entry_priv_usb_bcn *bcn_priv;
396 397
	unsigned int i;

I
Ivo van Doorn 已提交
398
	rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
399 400 401
				    REGISTER_TIMEOUT);

	/*
I
Ivo van Doorn 已提交
402
	 * Cancel all queues.
403
	 */
I
Ivo van Doorn 已提交
404
	for (i = 0; i < rt2x00dev->rx->limit; i++) {
405 406
		entry_priv = rt2x00dev->rx->entries[i].priv_data;
		usb_kill_urb(entry_priv->urb);
407
	}
408

409 410 411
	/*
	 * Kill guardian urb.
	 */
412
	for (i = 0; i < rt2x00dev->bcn->limit; i++) {
413 414 415
		bcn_priv = rt2x00dev->bcn->entries[i].priv_data;
		if (bcn_priv->guardian_urb)
			usb_kill_urb(bcn_priv->guardian_urb);
416
	}
417 418 419 420 421 422
}
EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);

/*
 * Device initialization handlers.
 */
423
void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
I
Ivo van Doorn 已提交
424
			    struct queue_entry *entry)
425
{
I
Ivo van Doorn 已提交
426
	struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
427
	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
428

429
	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
430 431 432 433
			  usb_rcvbulkpipe(usb_dev, 1),
			  entry->skb->data, entry->skb->len,
			  rt2x00usb_interrupt_rxdone, entry);

I
Ivo van Doorn 已提交
434
	__set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
435
	usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
436 437 438 439
}
EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);

void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
I
Ivo van Doorn 已提交
440
			    struct queue_entry *entry)
441 442 443 444 445
{
	entry->flags = 0;
}
EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);

446
static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
I
Ivo van Doorn 已提交
447
			       struct data_queue *queue)
448
{
449 450
	struct queue_entry_priv_usb *entry_priv;
	struct queue_entry_priv_usb_bcn *bcn_priv;
451 452
	unsigned int i;

453 454 455 456 457 458 459
	for (i = 0; i < queue->limit; i++) {
		entry_priv = queue->entries[i].priv_data;
		entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!entry_priv->urb)
			return -ENOMEM;
	}

460
	/*
461 462 463
	 * If this is not the beacon queue or
	 * no guardian byte was required for the beacon,
	 * then we are done.
464
	 */
465 466 467 468
	if (rt2x00dev->bcn != queue ||
	    !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
		return 0;

I
Ivo van Doorn 已提交
469
	for (i = 0; i < queue->limit; i++) {
470 471 472
		bcn_priv = queue->entries[i].priv_data;
		bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!bcn_priv->guardian_urb)
473 474 475 476 477 478 479
			return -ENOMEM;
	}

	return 0;
}

static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
I
Ivo van Doorn 已提交
480
			       struct data_queue *queue)
481
{
482 483
	struct queue_entry_priv_usb *entry_priv;
	struct queue_entry_priv_usb_bcn *bcn_priv;
484 485
	unsigned int i;

I
Ivo van Doorn 已提交
486
	if (!queue->entries)
487 488
		return;

I
Ivo van Doorn 已提交
489
	for (i = 0; i < queue->limit; i++) {
490 491 492
		entry_priv = queue->entries[i].priv_data;
		usb_kill_urb(entry_priv->urb);
		usb_free_urb(entry_priv->urb);
I
Ivo van Doorn 已提交
493 494
		if (queue->entries[i].skb)
			kfree_skb(queue->entries[i].skb);
495
	}
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510

	/*
	 * If this is not the beacon queue or
	 * no guardian byte was required for the beacon,
	 * then we are done.
	 */
	if (rt2x00dev->bcn != queue ||
	    !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
		return;

	for (i = 0; i < queue->limit; i++) {
		bcn_priv = queue->entries[i].priv_data;
		usb_kill_urb(bcn_priv->guardian_urb);
		usb_free_urb(bcn_priv->guardian_urb);
	}
511 512 513 514
}

int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
515
	struct data_queue *queue;
516 517 518
	struct sk_buff *skb;
	unsigned int entry_size;
	unsigned int i;
519
	int uninitialized_var(status);
520 521 522 523

	/*
	 * Allocate DMA
	 */
I
Ivo van Doorn 已提交
524 525
	queue_for_each(rt2x00dev, queue) {
		status = rt2x00usb_alloc_urb(rt2x00dev, queue);
526 527 528 529 530
		if (status)
			goto exit;
	}

	/*
I
Ivo van Doorn 已提交
531
	 * For the RX queue, skb's should be allocated.
532 533
	 */
	entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
I
Ivo van Doorn 已提交
534 535
	for (i = 0; i < rt2x00dev->rx->limit; i++) {
		skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
536 537 538
		if (!skb)
			goto exit;

I
Ivo van Doorn 已提交
539
		rt2x00dev->rx->entries[i].skb = skb;
540 541 542 543 544 545 546 547 548 549 550 551 552
	}

	return 0;

exit:
	rt2x00usb_uninitialize(rt2x00dev);

	return status;
}
EXPORT_SYMBOL_GPL(rt2x00usb_initialize);

void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
553
	struct data_queue *queue;
554

I
Ivo van Doorn 已提交
555 556
	queue_for_each(rt2x00dev, queue)
		rt2x00usb_free_urb(rt2x00dev, queue);
557 558 559 560 561 562 563 564 565 566 567 568 569 570
}
EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);

/*
 * USB driver handlers.
 */
static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rf);
	rt2x00dev->rf = NULL;

	kfree(rt2x00dev->eeprom);
	rt2x00dev->eeprom = NULL;

571 572
	kfree(rt2x00dev->csr.cache);
	rt2x00dev->csr.cache = NULL;
573 574 575 576
}

static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
{
577 578
	rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
	if (!rt2x00dev->csr.cache)
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
		goto exit;

	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
	if (!rt2x00dev->eeprom)
		goto exit;

	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
	if (!rt2x00dev->rf)
		goto exit;

	return 0;

exit:
	ERROR_PROBE("Failed to allocate registers.\n");

	rt2x00usb_free_reg(rt2x00dev);

	return -ENOMEM;
}

int rt2x00usb_probe(struct usb_interface *usb_intf,
		    const struct usb_device_id *id)
{
	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
	struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
	struct ieee80211_hw *hw;
	struct rt2x00_dev *rt2x00dev;
	int retval;

	usb_dev = usb_get_dev(usb_dev);

	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
	if (!hw) {
		ERROR_PROBE("Failed to allocate hardware.\n");
		retval = -ENOMEM;
		goto exit_put_device;
	}

	usb_set_intfdata(usb_intf, hw);

	rt2x00dev = hw->priv;
	rt2x00dev->dev = usb_intf;
	rt2x00dev->ops = ops;
	rt2x00dev->hw = hw;
623
	mutex_init(&rt2x00dev->usb_cache_mutex);
624

625 626 627 628 629
	rt2x00dev->usb_maxpacket =
	    usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
	if (!rt2x00dev->usb_maxpacket)
		rt2x00dev->usb_maxpacket = 1;

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
	retval = rt2x00usb_alloc_reg(rt2x00dev);
	if (retval)
		goto exit_free_device;

	retval = rt2x00lib_probe_dev(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00usb_free_reg(rt2x00dev);

exit_free_device:
	ieee80211_free_hw(hw);

exit_put_device:
	usb_put_dev(usb_dev);

	usb_set_intfdata(usb_intf, NULL);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00usb_probe);

void rt2x00usb_disconnect(struct usb_interface *usb_intf)
{
	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
	struct rt2x00_dev *rt2x00dev = hw->priv;

	/*
	 * Free all allocated data.
	 */
	rt2x00lib_remove_dev(rt2x00dev);
	rt2x00usb_free_reg(rt2x00dev);
	ieee80211_free_hw(hw);

	/*
	 * Free the USB device data.
	 */
	usb_set_intfdata(usb_intf, NULL);
	usb_put_dev(interface_to_usbdev(usb_intf));
}
EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);

#ifdef CONFIG_PM
int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
{
	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	retval = rt2x00lib_suspend(rt2x00dev, state);
	if (retval)
		return retval;

	rt2x00usb_free_reg(rt2x00dev);

	/*
	 * Decrease usbdev refcount.
	 */
	usb_put_dev(interface_to_usbdev(usb_intf));

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00usb_suspend);

int rt2x00usb_resume(struct usb_interface *usb_intf)
{
	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	usb_get_dev(interface_to_usbdev(usb_intf));

	retval = rt2x00usb_alloc_reg(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2x00lib_resume(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00usb_free_reg(rt2x00dev);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
#endif /* CONFIG_PM */

/*
I
Ivo van Doorn 已提交
724
 * rt2x00usb module information.
725 726 727
 */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
I
Ivo van Doorn 已提交
728
MODULE_DESCRIPTION("rt2x00 usb library");
729
MODULE_LICENSE("GPL");