es2.c 34.7 KB
Newer Older
1 2 3
/*
 * Greybus "AP" USB driver for "ES2" controller chips
 *
4 5
 * Copyright 2014-2015 Google Inc.
 * Copyright 2014-2015 Linaro Ltd.
6 7 8
 *
 * Released under the GPLv2 only.
 */
9
#include <linux/kthread.h>
10 11
#include <linux/sizes.h>
#include <linux/usb.h>
12 13
#include <linux/kfifo.h>
#include <linux/debugfs.h>
14
#include <asm/unaligned.h>
15 16

#include "greybus.h"
A
Alex Elder 已提交
17
#include "greybus_trace.h"
18
#include "kernel_ver.h"
19
#include "connection.h"
20 21 22 23 24

/* Fixed CPort numbers */
#define ES2_CPORT_CDSI0		16
#define ES2_CPORT_CDSI1		17

25 26
/* Memory sizes for the buffers sent to/from the ES2 controller */
#define ES2_GBUF_MSG_SIZE_MAX	2048
27

28 29 30
/* Memory sizes for the ARPC buffers */
#define ARPC_IN_SIZE_MAX	128

31
static const struct usb_device_id id_table[] = {
32
	{ USB_DEVICE(0x18d1, 0x1eaf) },
33 34 35 36
	{ },
};
MODULE_DEVICE_TABLE(usb, id_table);

37 38
#define APB1_LOG_SIZE		SZ_16K

39 40 41
/* Number of bulk in and bulk out couple */
#define NUM_BULKS		7

42 43 44 45 46 47
/* Expected number of bulk out endpoints */
#define NUM_BULKS_OUT		NUM_BULKS

/* Expected number of bulk in endpoints (including ARPC endpoint) */
#define NUM_BULKS_IN		(NUM_BULKS + 1)

48 49 50 51 52 53 54 55 56 57
/*
 * Number of CPort IN urbs in flight at any point in time.
 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
 * flight.
 */
#define NUM_CPORT_IN_URB	4

/* Number of CPort OUT urbs in flight at any point in time.
 * Adjust if we get messages saying we are out of urbs in the system log.
 */
58
#define NUM_CPORT_OUT_URB	(8 * NUM_BULKS)
59

60 61 62 63 64
/*
 * Number of ARPC in urbs in flight at any point in time.
 */
#define NUM_ARPC_IN_URB		2

65 66 67 68 69
/*
 * @endpoint: bulk in endpoint for CPort data
 * @urb: array of urbs for the CPort in messages
 * @buffer: array of buffers for the @cport_in_urb urbs
 */
70
struct es2_cport_in {
71 72 73 74 75 76 77 78
	__u8 endpoint;
	struct urb *urb[NUM_CPORT_IN_URB];
	u8 *buffer[NUM_CPORT_IN_URB];
};

/*
 * @endpoint: bulk out endpoint for CPort data
 */
79
struct es2_cport_out {
80 81 82
	__u8 endpoint;
};

83
/**
84
 * es2_ap_dev - ES2 USB Bridge to AP structure
85 86
 * @usb_dev: pointer to the USB device we are.
 * @usb_intf: pointer to the USB interface we are bound to.
87
 * @hd: pointer to our gb_host_device structure
88 89 90

 * @cport_in: endpoint, urbs and buffer for cport in messages
 * @cport_out: endpoint for for cport out messages
91 92 93
 * @cport_out_urb: array of urbs for the CPort out messages
 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
 *			not.
94 95
 * @cport_out_urb_cancelled: array of flags indicating whether the
 *			corresponding @cport_out_urb is being cancelled
96
 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
97
 *
98 99 100 101
 * @apb_log_task: task pointer for logging thread
 * @apb_log_dentry: file system entry for the log file interface
 * @apb_log_enable_dentry: file system entry for enabling logging
 * @apb_log_fifo: kernel FIFO to carry logged data
102 103 104
 * @arpc_urb: array of urbs for the ARPC in messages
 * @arpc_buffer: array of buffers for the @arpc_urb urbs
 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
105
 */
106
struct es2_ap_dev {
107 108
	struct usb_device *usb_dev;
	struct usb_interface *usb_intf;
109
	struct gb_host_device *hd;
110

111 112
	struct es2_cport_in cport_in[NUM_BULKS];
	struct es2_cport_out cport_out[NUM_BULKS];
113 114
	struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
	bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
115
	bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
116
	spinlock_t cport_out_urb_lock;
117

118 119
	bool cdsi1_in_use;

120
	int *cport_to_ep;
121

122 123 124 125
	struct task_struct *apb_log_task;
	struct dentry *apb_log_dentry;
	struct dentry *apb_log_enable_dentry;
	DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
126 127 128 129

	__u8 arpc_endpoint_in;
	struct urb *arpc_urb[NUM_ARPC_IN_URB];
	u8 *arpc_buffer[NUM_ARPC_IN_URB];
130 131
};

132 133 134 135 136 137
/**
 * cport_to_ep - information about cport to endpoints mapping
 * @cport_id: the id of cport to map to endpoints
 * @endpoint_in: the endpoint number to use for in transfer
 * @endpoint_out: he endpoint number to use for out transfer
 */
138 139 140 141
struct cport_to_ep {
	__le16 cport_id;
	__u8 endpoint_in;
	__u8 endpoint_out;
142 143
};

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/**
 * timesync_enable_request - Enable timesync in an APBridge
 * @count: number of TimeSync Pulses to expect
 * @frame_time: the initial FrameTime at the first TimeSync Pulse
 * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
 * @refclk: The AP mandated reference clock to run FrameTime at
 */
struct timesync_enable_request {
	__u8	count;
	__le64	frame_time;
	__le32	strobe_delay;
	__le32	refclk;
} __packed;

/**
 * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
 * @frame_time: An array of authoritative FrameTimes provided by the SVC
 *              and relayed to the APBridge by the AP
 */
struct timesync_authoritative_request {
	__le64	frame_time[GB_TIMESYNC_MAX_STROBES];
} __packed;

167
static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
168
{
169
	return (struct es2_ap_dev *)&hd->hd_priv;
170 171 172
}

static void cport_out_callback(struct urb *urb);
173 174
static void usb_log_enable(struct es2_ap_dev *es2);
static void usb_log_disable(struct es2_ap_dev *es2);
175

176
/* Get the endpoints pair mapped to the cport */
177
static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
178
{
179
	if (cport_id >= es2->hd->num_cports)
180
		return 0;
181
	return es2->cport_to_ep[cport_id];
182 183
}

184
#define ES2_TIMEOUT	500	/* 500 ms for the SVC to do something */
185

186 187
/* Disable for now until we work all of this out to keep a warning-free build */
#if 0
188
/* Test if the endpoints pair is already mapped to a cport */
189
static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
190 191 192
{
	int i;

193 194
	for (i = 0; i < es2->hd->num_cports; i++) {
		if (es2->cport_to_ep[i] == ep_pair)
195 196 197 198 199
			return 1;
	}
	return 0;
}

200
/* Configure the endpoint mapping and send the request to APBridge */
201
static int map_cport_to_ep(struct es2_ap_dev *es2,
202
				u16 cport_id, int ep_pair)
203 204 205 206
{
	int retval;
	struct cport_to_ep *cport_to_ep;

207
	if (ep_pair < 0 || ep_pair >= NUM_BULKS)
208
		return -EINVAL;
209
	if (cport_id >= es2->hd->num_cports)
210
		return -EINVAL;
211
	if (ep_pair && ep_pair_in_use(es2, ep_pair))
212 213 214 215 216 217
		return -EINVAL;

	cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
	if (!cport_to_ep)
		return -ENOMEM;

218
	es2->cport_to_ep[cport_id] = ep_pair;
219
	cport_to_ep->cport_id = cpu_to_le16(cport_id);
220 221
	cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
	cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
222

223 224
	retval = usb_control_msg(es2->usb_dev,
				 usb_sndctrlpipe(es2->usb_dev, 0),
225
				 GB_APB_REQUEST_EP_MAPPING,
226 227 228 229
				 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
				 0x00, 0x00,
				 (char *)cport_to_ep,
				 sizeof(*cport_to_ep),
230
				 ES2_TIMEOUT);
231 232 233 234 235 236 237
	if (retval == sizeof(*cport_to_ep))
		retval = 0;
	kfree(cport_to_ep);

	return retval;
}

238
/* Unmap a cport: use the muxed endpoints pair */
239
static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
240
{
241
	return map_cport_to_ep(es2, cport_id, 0);
242
}
243
#endif
244

245
static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
246 247
{
	struct usb_device *udev = es2->usb_dev;
248
	u8 *data;
249 250
	int retval;

251 252
	data = kmalloc(size, GFP_KERNEL);
	if (!data)
253
		return -ENOMEM;
254
	memcpy(data, req, size);
255 256

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
257
				 cmd,
258
				 USB_DIR_OUT | USB_TYPE_VENDOR |
259 260
				 USB_RECIP_INTERFACE,
				 0, 0, data, size, ES2_TIMEOUT);
261
	if (retval < 0)
262 263 264
		dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
	else
		retval = 0;
265

266
	kfree(data);
267
	return retval;
268
}
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

static void ap_urb_complete(struct urb *urb)
{
	struct usb_ctrlrequest *dr = urb->context;

	kfree(dr);
	usb_free_urb(urb);
}

static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
{
	struct usb_device *udev = es2->usb_dev;
	struct urb *urb;
	struct usb_ctrlrequest *dr;
	u8 *buf;
	int retval;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb)
		return -ENOMEM;

	dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
	if (!dr) {
		usb_free_urb(urb);
		return -ENOMEM;
	}

	buf = (u8 *)dr + sizeof(*dr);
	memcpy(buf, req, size);

	dr->bRequest = cmd;
	dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
	dr->wValue = 0;
	dr->wIndex = 0;
	dr->wLength = cpu_to_le16(size);

	usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
			     (unsigned char *)dr, buf, size,
			     ap_urb_complete, dr);
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval) {
		usb_free_urb(urb);
		kfree(dr);
	}
	return retval;
}

static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
		     bool async)
{
	struct es2_ap_dev *es2 = hd_to_es2(hd);

	if (async)
		return output_async(es2, req, size, cmd);

	return output_sync(es2, req, size, cmd);
}
326

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static int es2_cport_in_enable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int ret;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];

		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			dev_err(&es2->usb_dev->dev,
					"failed to submit in-urb: %d\n", ret);
			goto err_kill_urbs;
		}
	}

	return 0;

err_kill_urbs:
	for (--i; i >= 0; --i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}

	return ret;
}

356 357 358 359 360 361 362 363 364 365 366 367
static void es2_cport_in_disable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}
}

368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static int es2_arpc_in_enable(struct es2_ap_dev *es2)
{
	struct urb *urb;
	int ret;
	int i;

	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
		urb = es2->arpc_urb[i];

		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			dev_err(&es2->usb_dev->dev,
				"failed to submit arpc in-urb: %d\n", ret);
			goto err_kill_urbs;
		}
	}

	return 0;

err_kill_urbs:
	for (--i; i >= 0; --i) {
		urb = es2->arpc_urb[i];
		usb_kill_urb(urb);
	}

	return ret;
}

static void es2_arpc_in_disable(struct es2_ap_dev *es2)
{
	struct urb *urb;
	int i;

	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
		urb = es2->arpc_urb[i];
		usb_kill_urb(urb);
	}
}

407
static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
408 409 410 411 412
{
	struct urb *urb = NULL;
	unsigned long flags;
	int i;

413
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
414 415 416

	/* Look in our pool of allocated urbs first, as that's the "fastest" */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
417 418 419 420
		if (es2->cport_out_urb_busy[i] == false &&
				es2->cport_out_urb_cancelled[i] == false) {
			es2->cport_out_urb_busy[i] = true;
			urb = es2->cport_out_urb[i];
421 422 423
			break;
		}
	}
424
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
425 426 427 428 429 430 431
	if (urb)
		return urb;

	/*
	 * Crap, pool is empty, complain to the syslog and go allocate one
	 * dynamically as we have to succeed.
	 */
432
	dev_dbg(&es2->usb_dev->dev,
433 434 435 436
		"No free CPort OUT urbs, having to dynamically allocate one!\n");
	return usb_alloc_urb(0, gfp_mask);
}

437
static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
438 439 440 441 442 443 444
{
	unsigned long flags;
	int i;
	/*
	 * See if this was an urb in our pool, if so mark it "free", otherwise
	 * we need to free it ourselves.
	 */
445
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
446
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
447 448
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_busy[i] = false;
449 450 451 452
			urb = NULL;
			break;
		}
	}
453
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
454 455 456 457 458

	/* If urb is not NULL, then we need to free this urb */
	usb_free_urb(urb);
}

459 460 461 462 463 464 465
/*
 * We (ab)use the operation-message header pad bytes to transfer the
 * cport id in order to minimise overhead.
 */
static void
gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
{
466
	header->pad[0] = cport_id;
467 468 469 470 471
}

/* Clear the pad bytes used for the CPort id */
static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
{
472
	header->pad[0] = 0;
473 474 475 476 477
}

/* Extract the CPort id packed into the header, and clear it */
static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
{
478
	u16 cport_id = header->pad[0];
479 480 481 482 483 484

	gb_message_cport_clear(header);

	return cport_id;
}

485
/*
486 487
 * Returns zero if the message was successfully queued, or a negative errno
 * otherwise.
488
 */
489
static int message_send(struct gb_host_device *hd, u16 cport_id,
490
			struct gb_message *message, gfp_t gfp_mask)
491
{
492 493
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
494
	size_t buffer_size;
495 496
	int retval;
	struct urb *urb;
497
	int ep_pair;
498
	unsigned long flags;
499 500 501 502 503 504

	/*
	 * The data actually transferred will include an indication
	 * of where the data should be sent.  Do one last check of
	 * the target CPort id before filling it in.
	 */
505
	if (!cport_id_valid(hd, cport_id)) {
506
		dev_err(&udev->dev, "invalid cport %u\n", cport_id);
507
		return -EINVAL;
508 509 510
	}

	/* Find a free urb */
511
	urb = next_free_urb(es2, gfp_mask);
512
	if (!urb)
513 514
		return -ENOMEM;

515
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
516
	message->hcpriv = urb;
517
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
518

519 520
	/* Pack the cport id into the message header */
	gb_message_cport_pack(message->header, cport_id);
521

A
Alex Elder 已提交
522
	buffer_size = sizeof(*message->header) + message->payload_size;
523

524
	ep_pair = cport_to_ep_pair(es2, cport_id);
525
	usb_fill_bulk_urb(urb, udev,
526
			  usb_sndbulkpipe(udev,
527
					  es2->cport_out[ep_pair].endpoint),
A
Alex Elder 已提交
528
			  message->buffer, buffer_size,
529
			  cport_out_callback, message);
530
	urb->transfer_flags |= URB_ZERO_PACKET;
A
Alex Elder 已提交
531 532 533

	trace_gb_message_submit(message);

534 535
	retval = usb_submit_urb(urb, gfp_mask);
	if (retval) {
536
		dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
537

538
		spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
539
		message->hcpriv = NULL;
540
		spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
541

542
		free_urb(es2, urb);
543
		gb_message_cport_clear(message->header);
544 545

		return retval;
546 547
	}

548
	return 0;
549 550 551
}

/*
552
 * Can not be called in atomic context.
553
 */
554
static void message_cancel(struct gb_message *message)
555
{
556
	struct gb_host_device *hd = message->operation->connection->hd;
557
	struct es2_ap_dev *es2 = hd_to_es2(hd);
558 559
	struct urb *urb;
	int i;
560

561 562
	might_sleep();

563
	spin_lock_irq(&es2->cport_out_urb_lock);
564 565 566 567 568 569 570
	urb = message->hcpriv;

	/* Prevent dynamically allocated urb from being deallocated. */
	usb_get_urb(urb);

	/* Prevent pre-allocated urb from being reused. */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
571 572
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_cancelled[i] = true;
573 574 575
			break;
		}
	}
576
	spin_unlock_irq(&es2->cport_out_urb_lock);
577 578 579 580

	usb_kill_urb(urb);

	if (i < NUM_CPORT_OUT_URB) {
581
		spin_lock_irq(&es2->cport_out_urb_lock);
582
		es2->cport_out_urb_cancelled[i] = false;
583
		spin_unlock_irq(&es2->cport_out_urb_lock);
584 585 586
	}

	usb_free_urb(urb);
587 588
}

589
static int cport_reset(struct gb_host_device *hd, u16 cport_id)
590
{
591 592
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
593 594
	int retval;

595 596
	switch (cport_id) {
	case GB_SVC_CPORT_ID:
597 598
	case ES2_CPORT_CDSI0:
	case ES2_CPORT_CDSI1:
599 600 601
		return 0;
	}

602
	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
603
				 GB_APB_REQUEST_RESET_CPORT,
604
				 USB_DIR_OUT | USB_TYPE_VENDOR |
605
				 USB_RECIP_INTERFACE, cport_id, 0,
606
				 NULL, 0, ES2_TIMEOUT);
607
	if (retval < 0) {
608
		dev_err(&udev->dev, "failed to reset cport %u: %d\n", cport_id,
609 610 611 612 613 614 615
			retval);
		return retval;
	}

	return 0;
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
				unsigned long flags)
{
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct ida *id_map = &hd->cport_id_map;
	int ida_start, ida_end;

	switch (cport_id) {
	case ES2_CPORT_CDSI0:
	case ES2_CPORT_CDSI1:
		dev_err(&hd->dev, "cport %d not available\n", cport_id);
		return -EBUSY;
	}

	if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
			flags & GB_CONNECTION_FLAG_CDSI1) {
		if (es2->cdsi1_in_use) {
			dev_err(&hd->dev, "CDSI1 already in use\n");
			return -EBUSY;
		}

		es2->cdsi1_in_use = true;

		return ES2_CPORT_CDSI1;
	}

	if (cport_id < 0) {
		ida_start = 0;
		ida_end = hd->num_cports;
	} else if (cport_id < hd->num_cports) {
		ida_start = cport_id;
		ida_end = cport_id + 1;
	} else {
		dev_err(&hd->dev, "cport %d not available\n", cport_id);
		return -EINVAL;
	}

	return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
}

static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
{
	struct es2_ap_dev *es2 = hd_to_es2(hd);

	switch (cport_id) {
	case ES2_CPORT_CDSI1:
		es2->cdsi1_in_use = false;
		return;
	}

	ida_simple_remove(&hd->cport_id_map, cport_id);
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
static int cport_enable(struct gb_host_device *hd, u16 cport_id,
			unsigned long flags)
{
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
	struct gb_apb_request_cport_flags *req;
	int ret;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;

	if (flags & GB_CONNECTION_FLAG_CONTROL)
		req->flags |= GB_APB_CPORT_FLAG_CONTROL;
	if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
		req->flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;

	dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
			cport_id, req->flags);

	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				GB_APB_REQUEST_CPORT_FLAGS,
				USB_DIR_OUT | USB_TYPE_VENDOR |
				USB_RECIP_INTERFACE, cport_id, 0,
				req, sizeof(*req), ES2_TIMEOUT);
	if (ret != sizeof(*req)) {
		dev_err(&udev->dev, "failed to set cport flags for port %d\n",
				cport_id);
		if (ret >= 0)
			ret = -EIO;

		goto out;
	}

	ret = 0;
out:
	kfree(req);

	return ret;
}

710
static int cport_disable(struct gb_host_device *hd, u16 cport_id)
711 712 713
{
	int retval;

714 715 716
	retval = cport_reset(hd, cport_id);
	if (retval)
		return retval;
717 718 719 720

	return 0;
}

721
static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
722 723
{
	int retval;
724 725
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
726 727

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
728
				 GB_APB_REQUEST_LATENCY_TAG_EN,
729 730
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
731
				 0, ES2_TIMEOUT);
732 733 734 735 736 737 738

	if (retval < 0)
		dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
			cport_id);
	return retval;
}

739
static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
740 741
{
	int retval;
742 743
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
744 745

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
746
				 GB_APB_REQUEST_LATENCY_TAG_DIS,
747 748
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
749
				 0, ES2_TIMEOUT);
750 751 752 753 754 755 756

	if (retval < 0)
		dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
			cport_id);
	return retval;
}

757
static int cport_features_enable(struct gb_host_device *hd, u16 cport_id)
758 759 760 761 762 763
{
	int retval;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
764
				 GB_APB_REQUEST_CPORT_FEAT_EN,
765 766 767 768
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
				 0, ES2_TIMEOUT);
	if (retval < 0)
769
		dev_err(&udev->dev, "Cannot enable CPort features for cport %u: %d\n",
770 771 772 773
			cport_id, retval);
	return retval;
}

774
static int cport_features_disable(struct gb_host_device *hd, u16 cport_id)
775 776 777 778 779 780
{
	int retval;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
781
				 GB_APB_REQUEST_CPORT_FEAT_DIS,
782 783 784 785 786
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
				 0, ES2_TIMEOUT);
	if (retval < 0)
		dev_err(&udev->dev,
787
			"Cannot disable CPort features for cport %u: %d\n",
788 789 790 791
			cport_id, retval);
	return retval;
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
static int timesync_enable(struct gb_host_device *hd, u8 count,
			   u64 frame_time, u32 strobe_delay, u32 refclk)
{
	int retval;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
	struct gb_control_timesync_enable_request *request;

	request = kzalloc(sizeof(*request), GFP_KERNEL);
	if (!request)
		return -ENOMEM;

	request->count = count;
	request->frame_time = cpu_to_le64(frame_time);
	request->strobe_delay = cpu_to_le32(strobe_delay);
	request->refclk = cpu_to_le32(refclk);
	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_TIMESYNC_ENABLE,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, request,
				 sizeof(*request), ES2_TIMEOUT);
	if (retval < 0)
		dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);

	kfree(request);
	return retval;
}

static int timesync_disable(struct gb_host_device *hd)
{
	int retval;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_TIMESYNC_DISABLE,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, NULL,
				 0, ES2_TIMEOUT);
	if (retval < 0)
		dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);

	return retval;
}

static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
{
	int retval, i;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
	struct timesync_authoritative_request *request;

	request = kzalloc(sizeof(*request), GFP_KERNEL);
	if (!request)
		return -ENOMEM;

	for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
		request->frame_time[i] = cpu_to_le64(frame_time[i]);

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_TIMESYNC_AUTHORITATIVE,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, request,
				 sizeof(*request), ES2_TIMEOUT);
	if (retval < 0)
		dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);

	kfree(request);
	return retval;
}

static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
{
	int retval;
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
868
	__le64 *response_frame_time;
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895

	response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
	if (!response_frame_time)
		return -ENOMEM;

	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
				 REQUEST_TIMESYNC_GET_LAST_EVENT,
				 USB_DIR_IN | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, response_frame_time,
				 sizeof(*response_frame_time), ES2_TIMEOUT);

	if (retval != sizeof(*response_frame_time)) {
		dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
			retval);

		if (retval >= 0)
			retval = -EIO;

		goto out;
	}
	*frame_time = le64_to_cpu(*response_frame_time);
	retval = 0;
out:
	kfree(response_frame_time);
	return retval;
}

896
static struct gb_hd_driver es2_driver = {
897 898 899 900 901
	.hd_priv_size			= sizeof(struct es2_ap_dev),
	.message_send			= message_send,
	.message_cancel			= message_cancel,
	.cport_allocate			= es2_cport_allocate,
	.cport_release			= es2_cport_release,
902
	.cport_enable			= cport_enable,
903
	.cport_disable			= cport_disable,
904 905 906 907 908 909 910 911 912
	.latency_tag_enable		= latency_tag_enable,
	.latency_tag_disable		= latency_tag_disable,
	.output				= output,
	.cport_features_enable		= cport_features_enable,
	.cport_features_disable		= cport_features_disable,
	.timesync_enable		= timesync_enable,
	.timesync_disable		= timesync_disable,
	.timesync_authoritative		= timesync_authoritative,
	.timesync_get_last_event	= timesync_get_last_event,
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
};

/* Common function to report consistent warnings based on URB status */
static int check_urb_status(struct urb *urb)
{
	struct device *dev = &urb->dev->dev;
	int status = urb->status;

	switch (status) {
	case 0:
		return 0;

	case -EOVERFLOW:
		dev_err(dev, "%s: overflow actual length is %d\n",
			__func__, urb->actual_length);
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
	case -EILSEQ:
	case -EPROTO:
		/* device is gone, stop sending */
		return status;
	}
	dev_err(dev, "%s: unknown status %d\n", __func__, status);

	return -EAGAIN;
}

941
static void es2_destroy(struct es2_ap_dev *es2)
942 943
{
	struct usb_device *udev;
944
	int bulk_in;
945 946
	int i;

947
	debugfs_remove(es2->apb_log_enable_dentry);
948
	usb_log_disable(es2);
949

950 951
	/* Tear down everything! */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
952
		struct urb *urb = es2->cport_out_urb[i];
953 954 955 956 957

		if (!urb)
			break;
		usb_kill_urb(urb);
		usb_free_urb(urb);
958 959
		es2->cport_out_urb[i] = NULL;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
960 961
	}

962 963 964 965 966 967 968 969 970 971
	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
		struct urb *urb = es2->arpc_urb[i];

		if (!urb)
			break;
		usb_free_urb(urb);
		kfree(es2->arpc_buffer[i]);
		es2->arpc_buffer[i] = NULL;
	}

972
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
973 974
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

975 976 977 978 979 980 981 982 983
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb = cport_in->urb[i];

			if (!urb)
				break;
			usb_free_urb(urb);
			kfree(cport_in->buffer[i]);
			cport_in->buffer[i] = NULL;
		}
984 985
	}

986 987
	kfree(es2->cport_to_ep);

988 989 990 991
	/* release reserved CDSI0 and CDSI1 cports */
	gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
	gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);

992
	udev = es2->usb_dev;
993
	gb_hd_put(es2->hd);
994 995 996 997 998 999

	usb_put_dev(udev);
}

static void cport_in_callback(struct urb *urb)
{
1000
	struct gb_host_device *hd = urb->context;
1001
	struct device *dev = &urb->dev->dev;
1002
	struct gb_operation_msg_hdr *header;
1003 1004 1005 1006 1007 1008 1009
	int status = check_urb_status(urb);
	int retval;
	u16 cport_id;

	if (status) {
		if ((status == -EAGAIN) || (status == -EPROTO))
			goto exit;
1010 1011 1012 1013 1014

		/* The urb is being unlinked */
		if (status == -ENOENT || status == -ESHUTDOWN)
			return;

1015 1016 1017 1018
		dev_err(dev, "urb cport in error %d (dropped)\n", status);
		return;
	}

1019
	if (urb->actual_length < sizeof(*header)) {
1020
		dev_err(dev, "short message received\n");
1021 1022 1023
		goto exit;
	}

1024
	/* Extract the CPort id, which is packed in the message header */
1025
	header = urb->transfer_buffer;
1026
	cport_id = gb_message_cport_unpack(header);
1027

1028
	if (cport_id_valid(hd, cport_id)) {
A
Alex Elder 已提交
1029
		greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
1030
							urb->actual_length);
1031
	} else {
1032
		dev_err(dev, "invalid cport id %u received\n", cport_id);
1033
	}
1034 1035 1036 1037
exit:
	/* put our urb back in the request pool */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
1038
		dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
1039 1040 1041 1042
}

static void cport_out_callback(struct urb *urb)
{
1043
	struct gb_message *message = urb->context;
1044
	struct gb_host_device *hd = message->operation->connection->hd;
1045
	struct es2_ap_dev *es2 = hd_to_es2(hd);
1046
	int status = check_urb_status(urb);
1047
	unsigned long flags;
1048

1049
	gb_message_cport_clear(message->header);
1050

1051
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
1052
	message->hcpriv = NULL;
1053
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
1054

1055
	/*
1056 1057
	 * Tell the submitter that the message send (attempt) is
	 * complete, and report the status.
1058
	 */
1059 1060
	greybus_message_sent(hd, message, status);

1061
	free_urb(es2, urb);
1062 1063
}

1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static void arpc_in_callback(struct urb *urb)
{
	struct device *dev = &urb->dev->dev;
	int status = check_urb_status(urb);
	int retval;

	if (status) {
		if ((status == -EAGAIN) || (status == -EPROTO))
			goto exit;

		/* The urb is being unlinked */
		if (status == -ENOENT || status == -ESHUTDOWN)
			return;

		dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
		return;
	}

exit:
	/* put our urb back in the request pool */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
}

1089
#define APB1_LOG_MSG_SIZE	64
1090
static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1091 1092 1093 1094 1095
{
	int retval;

	/* SVC messages go down our control pipe */
	do {
1096 1097
		retval = usb_control_msg(es2->usb_dev,
					usb_rcvctrlpipe(es2->usb_dev, 0),
1098
					GB_APB_REQUEST_LOG,
1099 1100 1101
					USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
					0x00, 0x00,
					buf,
1102
					APB1_LOG_MSG_SIZE,
1103
					ES2_TIMEOUT);
1104
		if (retval > 0)
1105
			kfifo_in(&es2->apb_log_fifo, buf, retval);
1106 1107 1108
	} while (retval > 0);
}

1109
static int apb_log_poll(void *data)
1110
{
1111
	struct es2_ap_dev *es2 = data;
1112 1113 1114 1115 1116 1117
	char *buf;

	buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

1118 1119
	while (!kthread_should_stop()) {
		msleep(1000);
1120
		apb_log_get(es2, buf);
1121
	}
1122 1123 1124

	kfree(buf);

1125 1126 1127
	return 0;
}

1128
static ssize_t apb_log_read(struct file *f, char __user *buf,
1129 1130
				size_t count, loff_t *ppos)
{
1131
	struct es2_ap_dev *es2 = f->f_inode->i_private;
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	ssize_t ret;
	size_t copied;
	char *tmp_buf;

	if (count > APB1_LOG_SIZE)
		count = APB1_LOG_SIZE;

	tmp_buf = kmalloc(count, GFP_KERNEL);
	if (!tmp_buf)
		return -ENOMEM;

1143
	copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1144 1145 1146 1147 1148 1149 1150
	ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);

	kfree(tmp_buf);

	return ret;
}

1151 1152
static const struct file_operations apb_log_fops = {
	.read	= apb_log_read,
1153 1154
};

1155
static void usb_log_enable(struct es2_ap_dev *es2)
1156
{
1157
	if (!IS_ERR_OR_NULL(es2->apb_log_task))
1158 1159 1160
		return;

	/* get log from APB1 */
1161 1162
	es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
	if (IS_ERR(es2->apb_log_task))
1163
		return;
1164 1165
	/* XXX We will need to rename this per APB */
	es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1166
						gb_debugfs_get(), es2,
1167
						&apb_log_fops);
1168 1169
}

1170
static void usb_log_disable(struct es2_ap_dev *es2)
1171
{
1172
	if (IS_ERR_OR_NULL(es2->apb_log_task))
1173 1174
		return;

1175 1176
	debugfs_remove(es2->apb_log_dentry);
	es2->apb_log_dentry = NULL;
1177

1178 1179
	kthread_stop(es2->apb_log_task);
	es2->apb_log_task = NULL;
1180 1181
}

1182
static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1183 1184
				size_t count, loff_t *ppos)
{
1185
	struct es2_ap_dev *es2 = f->f_inode->i_private;
1186
	int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1187
	char tmp_buf[3];
1188 1189 1190 1191 1192

	sprintf(tmp_buf, "%d\n", enable);
	return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
}

1193
static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1194 1195 1196 1197
				size_t count, loff_t *ppos)
{
	int enable;
	ssize_t retval;
1198
	struct es2_ap_dev *es2 = f->f_inode->i_private;
1199 1200 1201 1202 1203 1204

	retval = kstrtoint_from_user(buf, count, 10, &enable);
	if (retval)
		return retval;

	if (enable)
1205
		usb_log_enable(es2);
1206
	else
1207
		usb_log_disable(es2);
1208 1209 1210 1211

	return count;
}

1212 1213 1214
static const struct file_operations apb_log_enable_fops = {
	.read	= apb_log_enable_read,
	.write	= apb_log_enable_write,
1215 1216
};

1217
static int apb_get_cport_count(struct usb_device *udev)
1218 1219 1220 1221
{
	int retval;
	__le16 *cport_count;

1222
	cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1223 1224 1225 1226
	if (!cport_count)
		return -ENOMEM;

	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1227
				 GB_APB_REQUEST_CPORT_COUNT,
1228 1229
				 USB_DIR_IN | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, cport_count,
1230
				 sizeof(*cport_count), ES2_TIMEOUT);
1231
	if (retval != sizeof(*cport_count)) {
1232 1233
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			retval);
1234 1235 1236 1237

		if (retval >= 0)
			retval = -EIO;

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
		goto out;
	}

	retval = le16_to_cpu(*cport_count);

	/* We need to fit a CPort ID in one byte of a message header */
	if (retval > U8_MAX) {
		retval = U8_MAX;
		dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
	}

out:
	kfree(cport_count);
	return retval;
}

1254
/*
1255 1256 1257 1258
 * The ES2 USB Bridge device has 15 endpoints
 * 1 Control - usual USB stuff + AP -> APBridgeA messages
 * 7 Bulk IN - CPort data in
 * 7 Bulk OUT - CPort data out
1259 1260 1261 1262
 */
static int ap_probe(struct usb_interface *interface,
		    const struct usb_device_id *id)
{
1263
	struct es2_ap_dev *es2;
1264
	struct gb_host_device *hd;
1265 1266 1267
	struct usb_device *udev;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
1268 1269
	int bulk_in = 0;
	int bulk_out = 0;
1270
	int retval;
1271
	int i;
1272
	int num_cports;
1273

1274 1275
	udev = usb_get_dev(interface_to_usbdev(interface));

1276
	num_cports = apb_get_cport_count(udev);
1277 1278 1279 1280 1281 1282 1283
	if (num_cports < 0) {
		usb_put_dev(udev);
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			num_cports);
		return num_cports;
	}

1284 1285
	hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
				num_cports);
1286
	if (IS_ERR(hd)) {
1287
		usb_put_dev(udev);
1288
		return PTR_ERR(hd);
1289 1290
	}

1291 1292 1293 1294 1295
	es2 = hd_to_es2(hd);
	es2->hd = hd;
	es2->usb_intf = interface;
	es2->usb_dev = udev;
	spin_lock_init(&es2->cport_out_urb_lock);
1296
	INIT_KFIFO(es2->apb_log_fifo);
1297
	usb_set_intfdata(interface, es2);
1298

1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	/*
	 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
	 * dynamically.
	 */
	retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
	if (retval)
		goto error;
	retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
	if (retval)
		goto error;

1310
	es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
1311
				   GFP_KERNEL);
1312
	if (!es2->cport_to_ep) {
1313 1314 1315 1316
		retval = -ENOMEM;
		goto error;
	}

1317
	/* find all bulk endpoints */
1318 1319 1320 1321
	iface_desc = interface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;

1322
		if (usb_endpoint_is_bulk_in(endpoint)) {
1323 1324 1325 1326 1327 1328 1329
			if (bulk_in < NUM_BULKS)
				es2->cport_in[bulk_in].endpoint =
					endpoint->bEndpointAddress;
			else
				es2->arpc_endpoint_in =
					endpoint->bEndpointAddress;
			bulk_in++;
1330
		} else if (usb_endpoint_is_bulk_out(endpoint)) {
1331
			es2->cport_out[bulk_out++].endpoint =
1332
				endpoint->bEndpointAddress;
1333 1334
		} else {
			dev_err(&udev->dev,
1335
				"Unknown endpoint type found, address 0x%02x\n",
1336 1337 1338
				endpoint->bEndpointAddress);
		}
	}
1339
	if (bulk_in != NUM_BULKS_IN || bulk_out != NUM_BULKS_OUT) {
1340
		dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1341
		retval = -ENODEV;
1342 1343 1344
		goto error;
	}

1345
	/* Allocate buffers for our cport in messages */
1346
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
1347 1348
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

1349 1350 1351 1352 1353
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb;
			u8 *buffer;

			urb = usb_alloc_urb(0, GFP_KERNEL);
1354 1355
			if (!urb) {
				retval = -ENOMEM;
1356
				goto error;
1357
			}
1358
			buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1359 1360
			if (!buffer) {
				retval = -ENOMEM;
1361
				goto error;
1362
			}
1363 1364 1365 1366

			usb_fill_bulk_urb(urb, udev,
					  usb_rcvbulkpipe(udev,
							  cport_in->endpoint),
1367
					  buffer, ES2_GBUF_MSG_SIZE_MAX,
1368 1369 1370 1371
					  cport_in_callback, hd);
			cport_in->urb[i] = urb;
			cport_in->buffer[i] = buffer;
		}
1372 1373
	}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
	/* Allocate buffers for ARPC in messages */
	for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
		struct urb *urb;
		u8 *buffer;

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			retval = -ENOMEM;
			goto error;
		}
		buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
		if (!buffer) {
			retval = -ENOMEM;
			goto error;
		}

		usb_fill_bulk_urb(urb, udev,
				  usb_rcvbulkpipe(udev,
						  es2->arpc_endpoint_in),
				  buffer, ARPC_IN_SIZE_MAX,
				  arpc_in_callback, es2);

		es2->arpc_urb[i] = urb;
		es2->arpc_buffer[i] = buffer;
	}

1400 1401 1402 1403 1404
	/* Allocate urbs for our CPort OUT messages */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		struct urb *urb;

		urb = usb_alloc_urb(0, GFP_KERNEL);
1405 1406
		if (!urb) {
			retval = -ENOMEM;
1407
			goto error;
1408
		}
1409

1410 1411
		es2->cport_out_urb[i] = urb;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
1412 1413
	}

1414 1415
	/* XXX We will need to rename this per APB */
	es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1416
							(S_IWUSR | S_IRUGO),
1417
							gb_debugfs_get(), es2,
1418
							&apb_log_enable_fops);
1419

1420 1421 1422
	if (es2_arpc_in_enable(es2))
		goto error;

1423 1424
	retval = gb_hd_add(hd);
	if (retval)
1425
		goto err_disable_arpc_in;
1426

1427 1428 1429
	for (i = 0; i < NUM_BULKS; ++i) {
		retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
		if (retval)
1430
			goto err_disable_cport_in;
1431 1432
	}

1433
	return 0;
1434 1435 1436 1437

err_disable_cport_in:
	for (--i; i >= 0; --i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);
1438
	gb_hd_del(hd);
1439 1440
err_disable_arpc_in:
	es2_arpc_in_disable(es2);
1441
error:
1442
	es2_destroy(es2);
1443 1444 1445 1446

	return retval;
}

1447 1448 1449 1450 1451 1452 1453 1454 1455
static void ap_disconnect(struct usb_interface *interface)
{
	struct es2_ap_dev *es2 = usb_get_intfdata(interface);
	int i;

	gb_hd_del(es2->hd);

	for (i = 0; i < NUM_BULKS; ++i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);
1456
	es2_arpc_in_disable(es2);
1457 1458 1459 1460

	es2_destroy(es2);
}

1461
static struct usb_driver es2_ap_driver = {
1462
	.name =		"es2_ap_driver",
1463 1464 1465
	.probe =	ap_probe,
	.disconnect =	ap_disconnect,
	.id_table =	id_table,
1466
	.soft_unbind =	1,
1467 1468
};

1469
module_usb_driver(es2_ap_driver);
1470

1471
MODULE_LICENSE("GPL v2");
1472
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");