es2.c 18.4 KB
Newer Older
1 2 3
/*
 * Greybus "AP" USB driver for "ES2" controller chips
 *
4 5
 * Copyright 2014-2015 Google Inc.
 * Copyright 2014-2015 Linaro Ltd.
6 7 8
 *
 * Released under the GPLv2 only.
 */
9
#include <linux/kthread.h>
10 11
#include <linux/sizes.h>
#include <linux/usb.h>
12 13
#include <linux/kfifo.h>
#include <linux/debugfs.h>
14
#include <asm/unaligned.h>
15 16 17

#include "greybus.h"
#include "kernel_ver.h"
18
#include "connection.h"
19 20

/* Memory sizes for the buffers sent to/from the ES1 controller */
21
#define ES1_GBUF_MSG_SIZE_MAX	2048
22 23

static const struct usb_device_id id_table[] = {
24 25
	/* Made up numbers for the SVC USB Bridge in ES2 */
	{ USB_DEVICE(0xffff, 0x0002) },
26 27 28 29
	{ },
};
MODULE_DEVICE_TABLE(usb, id_table);

30 31 32 33 34 35
#define APB1_LOG_SIZE		SZ_16K
static struct dentry *apb1_log_dentry;
static struct dentry *apb1_log_enable_dentry;
static struct task_struct *apb1_log_task;
static DEFINE_KFIFO(apb1_log_fifo, char, APB1_LOG_SIZE);

36
/* Number of cport present on USB bridge */
37
#define CPORT_COUNT		44
38

39 40 41
/* Number of bulk in and bulk out couple */
#define NUM_BULKS		7

42 43 44 45 46 47 48 49 50 51
/*
 * Number of CPort IN urbs in flight at any point in time.
 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
 * flight.
 */
#define NUM_CPORT_IN_URB	4

/* Number of CPort OUT urbs in flight at any point in time.
 * Adjust if we get messages saying we are out of urbs in the system log.
 */
52
#define NUM_CPORT_OUT_URB	(8 * NUM_BULKS)
53

54 55 56 57 58 59
/* vendor request AP message */
#define REQUEST_SVC		0x01

/* vendor request APB1 log */
#define REQUEST_LOG		0x02

60 61 62
/* vendor request to map a cport to bulk in and bulk out endpoints */
#define REQUEST_EP_MAPPING	0x03

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*
 * @endpoint: bulk in endpoint for CPort data
 * @urb: array of urbs for the CPort in messages
 * @buffer: array of buffers for the @cport_in_urb urbs
 */
struct es1_cport_in {
	__u8 endpoint;
	struct urb *urb[NUM_CPORT_IN_URB];
	u8 *buffer[NUM_CPORT_IN_URB];
};

/*
 * @endpoint: bulk out endpoint for CPort data
 */
struct es1_cport_out {
	__u8 endpoint;
};

81 82 83 84 85 86
/**
 * es1_ap_dev - ES1 USB Bridge to AP structure
 * @usb_dev: pointer to the USB device we are.
 * @usb_intf: pointer to the USB interface we are bound to.
 * @hd: pointer to our greybus_host_device structure
 * @control_endpoint: endpoint to send data to SVC
87 88 89

 * @cport_in: endpoint, urbs and buffer for cport in messages
 * @cport_out: endpoint for for cport out messages
90 91 92
 * @cport_out_urb: array of urbs for the CPort out messages
 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
 *			not.
93 94
 * @cport_out_urb_cancelled: array of flags indicating whether the
 *			corresponding @cport_out_urb is being cancelled
95 96 97 98 99 100 101 102 103
 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
 */
struct es1_ap_dev {
	struct usb_device *usb_dev;
	struct usb_interface *usb_intf;
	struct greybus_host_device *hd;

	__u8 control_endpoint;

104 105
	struct es1_cport_in cport_in[NUM_BULKS];
	struct es1_cport_out cport_out[NUM_BULKS];
106 107
	struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
	bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
108
	bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
109
	spinlock_t cport_out_urb_lock;
110

111
	int cport_to_ep[CPORT_COUNT];
112 113 114 115 116 117
};

struct cport_to_ep {
	__le16 cport_id;
	__u8 endpoint_in;
	__u8 endpoint_out;
118 119 120 121 122 123 124 125
};

static inline struct es1_ap_dev *hd_to_es1(struct greybus_host_device *hd)
{
	return (struct es1_ap_dev *)&hd->hd_priv;
}

static void cport_out_callback(struct urb *urb);
126 127
static void usb_log_enable(struct es1_ap_dev *es1);
static void usb_log_disable(struct es1_ap_dev *es1);
128

129 130
static int cport_to_ep(struct es1_ap_dev *es1, u16 cport_id)
{
131
	if (cport_id >= es1->hd->num_cports)
132 133 134 135
		return 0;
	return es1->cport_to_ep[cport_id];
}

136 137
#define ES1_TIMEOUT	500	/* 500 ms for the SVC to do something */

138 139 140 141
static int ep_in_use(struct es1_ap_dev *es1, int bulk_ep_set)
{
	int i;

142
	for (i = 0; i < es1->hd->num_cports; i++) {
143 144 145 146 147 148 149 150 151 152 153 154 155 156
		if (es1->cport_to_ep[i] == bulk_ep_set)
			return 1;
	}
	return 0;
}

int map_cport_to_ep(struct es1_ap_dev *es1,
				u16 cport_id, int bulk_ep_set)
{
	int retval;
	struct cport_to_ep *cport_to_ep;

	if (bulk_ep_set == 0 || bulk_ep_set >= NUM_BULKS)
		return -EINVAL;
157
	if (cport_id >= es1->hd->num_cports)
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
		return -EINVAL;
	if (bulk_ep_set && ep_in_use(es1, bulk_ep_set))
		return -EINVAL;

	cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
	if (!cport_to_ep)
		return -ENOMEM;

	es1->cport_to_ep[cport_id] = bulk_ep_set;
	cport_to_ep->cport_id = cpu_to_le16(cport_id);
	cport_to_ep->endpoint_in = es1->cport_in[bulk_ep_set].endpoint;
	cport_to_ep->endpoint_out = es1->cport_out[bulk_ep_set].endpoint;

	retval = usb_control_msg(es1->usb_dev,
				 usb_sndctrlpipe(es1->usb_dev,
						 es1->control_endpoint),
				 REQUEST_EP_MAPPING,
				 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
				 0x00, 0x00,
				 (char *)cport_to_ep,
				 sizeof(*cport_to_ep),
				 ES1_TIMEOUT);
	if (retval == sizeof(*cport_to_ep))
		retval = 0;
	kfree(cport_to_ep);

	return retval;
}

int unmap_cport(struct es1_ap_dev *es1, u16 cport_id)
{
	return map_cport_to_ep(es1, cport_id, 0);
}

192 193 194 195 196 197 198 199 200 201
static struct urb *next_free_urb(struct es1_ap_dev *es1, gfp_t gfp_mask)
{
	struct urb *urb = NULL;
	unsigned long flags;
	int i;

	spin_lock_irqsave(&es1->cport_out_urb_lock, flags);

	/* Look in our pool of allocated urbs first, as that's the "fastest" */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
202 203
		if (es1->cport_out_urb_busy[i] == false &&
				es1->cport_out_urb_cancelled[i] == false) {
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
			es1->cport_out_urb_busy[i] = true;
			urb = es1->cport_out_urb[i];
			break;
		}
	}
	spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
	if (urb)
		return urb;

	/*
	 * Crap, pool is empty, complain to the syslog and go allocate one
	 * dynamically as we have to succeed.
	 */
	dev_err(&es1->usb_dev->dev,
		"No free CPort OUT urbs, having to dynamically allocate one!\n");
	return usb_alloc_urb(0, gfp_mask);
}

static void free_urb(struct es1_ap_dev *es1, struct urb *urb)
{
	unsigned long flags;
	int i;
	/*
	 * See if this was an urb in our pool, if so mark it "free", otherwise
	 * we need to free it ourselves.
	 */
	spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		if (urb == es1->cport_out_urb[i]) {
			es1->cport_out_urb_busy[i] = false;
			urb = NULL;
			break;
		}
	}
	spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);

	/* If urb is not NULL, then we need to free this urb */
	usb_free_urb(urb);
}

244 245 246 247 248 249 250
/*
 * We (ab)use the operation-message header pad bytes to transfer the
 * cport id in order to minimise overhead.
 */
static void
gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
{
251
	header->pad[0] = cport_id;
252 253 254 255 256
}

/* Clear the pad bytes used for the CPort id */
static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
{
257
	header->pad[0] = 0;
258 259 260 261 262
}

/* Extract the CPort id packed into the header, and clear it */
static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
{
263
	u16 cport_id = header->pad[0];
264 265 266 267 268 269

	gb_message_cport_clear(header);

	return cport_id;
}

270
/*
271 272
 * Returns zero if the message was successfully queued, or a negative errno
 * otherwise.
273
 */
274
static int message_send(struct greybus_host_device *hd, u16 cport_id,
275
			struct gb_message *message, gfp_t gfp_mask)
276 277 278
{
	struct es1_ap_dev *es1 = hd_to_es1(hd);
	struct usb_device *udev = es1->usb_dev;
279
	size_t buffer_size;
280 281
	int retval;
	struct urb *urb;
282
	int bulk_ep_set;
283
	unsigned long flags;
284 285 286 287 288 289

	/*
	 * The data actually transferred will include an indication
	 * of where the data should be sent.  Do one last check of
	 * the target CPort id before filling it in.
	 */
290
	if (!cport_id_valid(hd, cport_id)) {
A
Alex Elder 已提交
291
		pr_err("invalid destination cport 0x%02x\n", cport_id);
292
		return -EINVAL;
293 294 295 296 297
	}

	/* Find a free urb */
	urb = next_free_urb(es1, gfp_mask);
	if (!urb)
298 299 300 301 302
		return -ENOMEM;

	spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
	message->hcpriv = urb;
	spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);
303

304 305
	/* Pack the cport id into the message header */
	gb_message_cport_pack(message->header, cport_id);
306

A
Alex Elder 已提交
307
	buffer_size = sizeof(*message->header) + message->payload_size;
308

309
	bulk_ep_set = cport_to_ep(es1, cport_id);
310
	usb_fill_bulk_urb(urb, udev,
311 312
			  usb_sndbulkpipe(udev,
					  es1->cport_out[bulk_ep_set].endpoint),
A
Alex Elder 已提交
313
			  message->buffer, buffer_size,
314
			  cport_out_callback, message);
315
	urb->transfer_flags |= URB_ZERO_PACKET;
316
	gb_connection_push_timestamp(message->operation->connection);
317 318 319
	retval = usb_submit_urb(urb, gfp_mask);
	if (retval) {
		pr_err("error %d submitting URB\n", retval);
320 321 322 323 324

		spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
		message->hcpriv = NULL;
		spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);

325
		free_urb(es1, urb);
326
		gb_message_cport_clear(message->header);
327 328

		return retval;
329 330
	}

331
	return 0;
332 333 334
}

/*
335
 * Can not be called in atomic context.
336
 */
337
static void message_cancel(struct gb_message *message)
338
{
339 340 341 342
	struct greybus_host_device *hd = message->operation->connection->hd;
	struct es1_ap_dev *es1 = hd_to_es1(hd);
	struct urb *urb;
	int i;
343

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	might_sleep();

	spin_lock_irq(&es1->cport_out_urb_lock);
	urb = message->hcpriv;

	/* Prevent dynamically allocated urb from being deallocated. */
	usb_get_urb(urb);

	/* Prevent pre-allocated urb from being reused. */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		if (urb == es1->cport_out_urb[i]) {
			es1->cport_out_urb_cancelled[i] = true;
			break;
		}
	}
	spin_unlock_irq(&es1->cport_out_urb_lock);

	usb_kill_urb(urb);

	if (i < NUM_CPORT_OUT_URB) {
		spin_lock_irq(&es1->cport_out_urb_lock);
		es1->cport_out_urb_cancelled[i] = false;
		spin_unlock_irq(&es1->cport_out_urb_lock);
	}

	usb_free_urb(urb);
370 371 372 373
}

static struct greybus_host_driver es1_driver = {
	.hd_priv_size		= sizeof(struct es1_ap_dev),
374 375
	.message_send		= message_send,
	.message_cancel		= message_cancel,
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
};

/* Common function to report consistent warnings based on URB status */
static int check_urb_status(struct urb *urb)
{
	struct device *dev = &urb->dev->dev;
	int status = urb->status;

	switch (status) {
	case 0:
		return 0;

	case -EOVERFLOW:
		dev_err(dev, "%s: overflow actual length is %d\n",
			__func__, urb->actual_length);
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
	case -EILSEQ:
	case -EPROTO:
		/* device is gone, stop sending */
		return status;
	}
	dev_err(dev, "%s: unknown status %d\n", __func__, status);

	return -EAGAIN;
}

static void ap_disconnect(struct usb_interface *interface)
{
	struct es1_ap_dev *es1;
	struct usb_device *udev;
408
	int bulk_in;
409 410 411 412 413 414
	int i;

	es1 = usb_get_intfdata(interface);
	if (!es1)
		return;

415 416
	usb_log_disable(es1);

417 418 419 420 421 422 423 424 425 426 427 428
	/* Tear down everything! */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		struct urb *urb = es1->cport_out_urb[i];

		if (!urb)
			break;
		usb_kill_urb(urb);
		usb_free_urb(urb);
		es1->cport_out_urb[i] = NULL;
		es1->cport_out_urb_busy[i] = false;	/* just to be anal */
	}

429 430 431 432 433 434 435 436 437 438 439 440
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
		struct es1_cport_in *cport_in = &es1->cport_in[bulk_in];
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb = cport_in->urb[i];

			if (!urb)
				break;
			usb_kill_urb(urb);
			usb_free_urb(urb);
			kfree(cport_in->buffer[i]);
			cport_in->buffer[i] = NULL;
		}
441 442 443 444 445 446 447 448 449 450 451 452 453
	}

	usb_set_intfdata(interface, NULL);
	udev = es1->usb_dev;
	greybus_remove_hd(es1->hd);

	usb_put_dev(udev);
}

static void cport_in_callback(struct urb *urb)
{
	struct greybus_host_device *hd = urb->context;
	struct device *dev = &urb->dev->dev;
454
	struct gb_operation_msg_hdr *header;
455 456 457 458 459 460 461 462 463 464 465
	int status = check_urb_status(urb);
	int retval;
	u16 cport_id;

	if (status) {
		if ((status == -EAGAIN) || (status == -EPROTO))
			goto exit;
		dev_err(dev, "urb cport in error %d (dropped)\n", status);
		return;
	}

466 467
	if (urb->actual_length < sizeof(*header)) {
		dev_err(dev, "%s: short message received\n", __func__);
468 469 470
		goto exit;
	}

471
	/* Extract the CPort id, which is packed in the message header */
472
	header = urb->transfer_buffer;
473
	cport_id = gb_message_cport_unpack(header);
474

475
	if (cport_id_valid(hd, cport_id))
A
Alex Elder 已提交
476
		greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
477
							urb->actual_length);
A
Alex Elder 已提交
478 479 480
	else
		dev_err(dev, "%s: invalid cport id 0x%02x received\n",
				__func__, cport_id);
481 482 483 484 485 486 487 488 489 490
exit:
	/* put our urb back in the request pool */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(dev, "%s: error %d in submitting urb.\n",
			__func__, retval);
}

static void cport_out_callback(struct urb *urb)
{
491 492
	struct gb_message *message = urb->context;
	struct greybus_host_device *hd = message->operation->connection->hd;
493 494
	struct es1_ap_dev *es1 = hd_to_es1(hd);
	int status = check_urb_status(urb);
495
	unsigned long flags;
496

497
	gb_message_cport_clear(message->header);
498

499
	/*
500 501
	 * Tell the submitter that the message send (attempt) is
	 * complete, and report the status.
502
	 */
503 504
	greybus_message_sent(hd, message, status);

505 506 507 508
	spin_lock_irqsave(&es1->cport_out_urb_lock, flags);
	message->hcpriv = NULL;
	spin_unlock_irqrestore(&es1->cport_out_urb_lock, flags);

509 510 511
	free_urb(es1, urb);
}

512 513
#define APB1_LOG_MSG_SIZE	64
static void apb1_log_get(struct es1_ap_dev *es1, char *buf)
514 515 516 517 518 519 520 521
{
	int retval;

	/* SVC messages go down our control pipe */
	do {
		retval = usb_control_msg(es1->usb_dev,
					usb_rcvctrlpipe(es1->usb_dev,
							es1->control_endpoint),
522
					REQUEST_LOG,
523 524 525
					USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
					0x00, 0x00,
					buf,
526
					APB1_LOG_MSG_SIZE,
527 528 529 530 531 532 533 534
					ES1_TIMEOUT);
		if (retval > 0)
			kfifo_in(&apb1_log_fifo, buf, retval);
	} while (retval > 0);
}

static int apb1_log_poll(void *data)
{
535 536 537 538 539 540 541
	struct es1_ap_dev *es1 = data;
	char *buf;

	buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

542 543
	while (!kthread_should_stop()) {
		msleep(1000);
544
		apb1_log_get(es1, buf);
545
	}
546 547 548

	kfree(buf);

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	return 0;
}

static ssize_t apb1_log_read(struct file *f, char __user *buf,
				size_t count, loff_t *ppos)
{
	ssize_t ret;
	size_t copied;
	char *tmp_buf;

	if (count > APB1_LOG_SIZE)
		count = APB1_LOG_SIZE;

	tmp_buf = kmalloc(count, GFP_KERNEL);
	if (!tmp_buf)
		return -ENOMEM;

	copied = kfifo_out(&apb1_log_fifo, tmp_buf, count);
	ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);

	kfree(tmp_buf);

	return ret;
}

static const struct file_operations apb1_log_fops = {
	.read	= apb1_log_read,
};

static void usb_log_enable(struct es1_ap_dev *es1)
{
580
	if (!IS_ERR_OR_NULL(apb1_log_task))
581 582 583 584
		return;

	/* get log from APB1 */
	apb1_log_task = kthread_run(apb1_log_poll, es1, "apb1_log");
585
	if (IS_ERR(apb1_log_task))
586 587 588 589 590 591 592 593
		return;
	apb1_log_dentry = debugfs_create_file("apb1_log", S_IRUGO,
						gb_debugfs_get(), NULL,
						&apb1_log_fops);
}

static void usb_log_disable(struct es1_ap_dev *es1)
{
594
	if (IS_ERR_OR_NULL(apb1_log_task))
595 596 597 598 599 600 601 602 603 604 605 606 607
		return;

	debugfs_remove(apb1_log_dentry);
	apb1_log_dentry = NULL;

	kthread_stop(apb1_log_task);
	apb1_log_task = NULL;
}

static ssize_t apb1_log_enable_read(struct file *f, char __user *buf,
				size_t count, loff_t *ppos)
{
	char tmp_buf[3];
608
	int enable = !IS_ERR_OR_NULL(apb1_log_task);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

	sprintf(tmp_buf, "%d\n", enable);
	return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
}

static ssize_t apb1_log_enable_write(struct file *f, const char __user *buf,
				size_t count, loff_t *ppos)
{
	int enable;
	ssize_t retval;
	struct es1_ap_dev *es1 = (struct es1_ap_dev *)f->f_inode->i_private;

	retval = kstrtoint_from_user(buf, count, 10, &enable);
	if (retval)
		return retval;

	if (enable)
		usb_log_enable(es1);
	else
		usb_log_disable(es1);

	return count;
}

static const struct file_operations apb1_log_enable_fops = {
	.read	= apb1_log_enable_read,
	.write	= apb1_log_enable_write,
};

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
/*
 * The ES1 USB Bridge device contains 4 endpoints
 * 1 Control - usual USB stuff + AP -> SVC messages
 * 1 Interrupt IN - SVC -> AP messages
 * 1 Bulk IN - CPort data in
 * 1 Bulk OUT - CPort data out
 */
static int ap_probe(struct usb_interface *interface,
		    const struct usb_device_id *id)
{
	struct es1_ap_dev *es1;
	struct greybus_host_device *hd;
	struct usb_device *udev;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
653 654
	int bulk_in = 0;
	int bulk_out = 0;
655 656 657
	int retval = -ENOMEM;
	int i;

658 659 660
	/* We need to fit a CPort ID in one byte of a message header */
	BUILD_BUG_ON(CPORT_ID_MAX > U8_MAX);

661 662
	udev = usb_get_dev(interface_to_usbdev(interface));

663 664
	hd = greybus_create_hd(&es1_driver, &udev->dev, ES1_GBUF_MSG_SIZE_MAX,
			       CPORT_COUNT);
665
	if (IS_ERR(hd)) {
666
		usb_put_dev(udev);
667
		return PTR_ERR(hd);
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	}

	es1 = hd_to_es1(hd);
	es1->hd = hd;
	es1->usb_intf = interface;
	es1->usb_dev = udev;
	spin_lock_init(&es1->cport_out_urb_lock);
	usb_set_intfdata(interface, es1);

	/* Control endpoint is the pipe to talk to this AP, so save it off */
	endpoint = &udev->ep0.desc;
	es1->control_endpoint = endpoint->bEndpointAddress;

	/* find all 3 of our endpoints */
	iface_desc = interface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;

686
		if (usb_endpoint_is_bulk_in(endpoint)) {
687 688
			es1->cport_in[bulk_in++].endpoint =
				endpoint->bEndpointAddress;
689
		} else if (usb_endpoint_is_bulk_out(endpoint)) {
690 691
			es1->cport_out[bulk_out++].endpoint =
				endpoint->bEndpointAddress;
692 693 694 695 696 697
		} else {
			dev_err(&udev->dev,
				"Unknown endpoint type found, address %x\n",
				endpoint->bEndpointAddress);
		}
	}
698
	if ((bulk_in == 0) ||
699
	    (bulk_out == 0)) {
700 701 702 703 704
		dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
		goto error;
	}

	/* Allocate buffers for our cport in messages and start them up */
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
		struct es1_cport_in *cport_in = &es1->cport_in[bulk_in];
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb;
			u8 *buffer;

			urb = usb_alloc_urb(0, GFP_KERNEL);
			if (!urb)
				goto error;
			buffer = kmalloc(ES1_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
			if (!buffer)
				goto error;

			usb_fill_bulk_urb(urb, udev,
					  usb_rcvbulkpipe(udev,
							  cport_in->endpoint),
					  buffer, ES1_GBUF_MSG_SIZE_MAX,
					  cport_in_callback, hd);
			cport_in->urb[i] = urb;
			cport_in->buffer[i] = buffer;
			retval = usb_submit_urb(urb, GFP_KERNEL);
			if (retval)
				goto error;
		}
729 730 731 732 733 734 735 736 737 738 739 740 741 742
	}

	/* Allocate urbs for our CPort OUT messages */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		struct urb *urb;

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb)
			goto error;

		es1->cport_out_urb[i] = urb;
		es1->cport_out_urb_busy[i] = false;	/* just to be anal */
	}

743 744 745 746
	apb1_log_enable_dentry = debugfs_create_file("apb1_log_enable",
							(S_IWUSR | S_IRUGO),
							gb_debugfs_get(), es1,
							&apb1_log_enable_fops);
747 748 749 750 751 752 753 754
	return 0;
error:
	ap_disconnect(interface);

	return retval;
}

static struct usb_driver es1_ap_driver = {
755
	.name =		"es2_ap_driver",
756 757 758 759 760 761 762
	.probe =	ap_probe,
	.disconnect =	ap_disconnect,
	.id_table =	id_table,
};

module_usb_driver(es1_ap_driver);

763
MODULE_LICENSE("GPL v2");
764
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");