es2.c 23.2 KB
Newer Older
1 2 3
/*
 * Greybus "AP" USB driver for "ES2" controller chips
 *
4 5
 * Copyright 2014-2015 Google Inc.
 * Copyright 2014-2015 Linaro Ltd.
6 7 8
 *
 * Released under the GPLv2 only.
 */
9
#include <linux/kthread.h>
10 11
#include <linux/sizes.h>
#include <linux/usb.h>
12 13
#include <linux/kfifo.h>
#include <linux/debugfs.h>
14
#include <asm/unaligned.h>
15 16 17

#include "greybus.h"
#include "kernel_ver.h"
18
#include "connection.h"
19
#include "greybus_trace.h"
20

21 22
/* Memory sizes for the buffers sent to/from the ES2 controller */
#define ES2_GBUF_MSG_SIZE_MAX	2048
23 24

static const struct usb_device_id id_table[] = {
25
	{ USB_DEVICE(0x18d1, 0x1eaf) },
26 27 28 29
	{ },
};
MODULE_DEVICE_TABLE(usb, id_table);

30 31
#define APB1_LOG_SIZE		SZ_16K

32 33 34
/* Number of bulk in and bulk out couple */
#define NUM_BULKS		7

35 36 37 38 39 40 41 42 43 44
/*
 * Number of CPort IN urbs in flight at any point in time.
 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
 * flight.
 */
#define NUM_CPORT_IN_URB	4

/* Number of CPort OUT urbs in flight at any point in time.
 * Adjust if we get messages saying we are out of urbs in the system log.
 */
45
#define NUM_CPORT_OUT_URB	(8 * NUM_BULKS)
46

47 48 49
/* vendor request APB1 log */
#define REQUEST_LOG		0x02

50 51 52
/* vendor request to map a cport to bulk in and bulk out endpoints */
#define REQUEST_EP_MAPPING	0x03

53 54 55
/* vendor request to get the number of cports available */
#define REQUEST_CPORT_COUNT	0x04

56 57 58
/* vendor request to reset a cport state */
#define REQUEST_RESET_CPORT	0x05

59 60 61 62
/* vendor request to time the latency of messages on a given cport */
#define REQUEST_LATENCY_TAG_EN	0x06
#define REQUEST_LATENCY_TAG_DIS	0x07

63 64 65 66 67
/*
 * @endpoint: bulk in endpoint for CPort data
 * @urb: array of urbs for the CPort in messages
 * @buffer: array of buffers for the @cport_in_urb urbs
 */
68
struct es2_cport_in {
69 70 71 72 73 74 75 76
	__u8 endpoint;
	struct urb *urb[NUM_CPORT_IN_URB];
	u8 *buffer[NUM_CPORT_IN_URB];
};

/*
 * @endpoint: bulk out endpoint for CPort data
 */
77
struct es2_cport_out {
78 79 80
	__u8 endpoint;
};

81
/**
82
 * es2_ap_dev - ES2 USB Bridge to AP structure
83 84
 * @usb_dev: pointer to the USB device we are.
 * @usb_intf: pointer to the USB interface we are bound to.
85
 * @hd: pointer to our gb_host_device structure
86 87 88

 * @cport_in: endpoint, urbs and buffer for cport in messages
 * @cport_out: endpoint for for cport out messages
89 90 91
 * @cport_out_urb: array of urbs for the CPort out messages
 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
 *			not.
92 93
 * @cport_out_urb_cancelled: array of flags indicating whether the
 *			corresponding @cport_out_urb is being cancelled
94
 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
95
 *
96 97 98 99
 * @apb_log_task: task pointer for logging thread
 * @apb_log_dentry: file system entry for the log file interface
 * @apb_log_enable_dentry: file system entry for enabling logging
 * @apb_log_fifo: kernel FIFO to carry logged data
100
 */
101
struct es2_ap_dev {
102 103
	struct usb_device *usb_dev;
	struct usb_interface *usb_intf;
104
	struct gb_host_device *hd;
105

106 107
	struct es2_cport_in cport_in[NUM_BULKS];
	struct es2_cport_out cport_out[NUM_BULKS];
108 109
	struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
	bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
110
	bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
111
	spinlock_t cport_out_urb_lock;
112

113
	int *cport_to_ep;
114

115 116 117 118
	struct task_struct *apb_log_task;
	struct dentry *apb_log_dentry;
	struct dentry *apb_log_enable_dentry;
	DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
119 120
};

121 122 123 124 125 126
/**
 * cport_to_ep - information about cport to endpoints mapping
 * @cport_id: the id of cport to map to endpoints
 * @endpoint_in: the endpoint number to use for in transfer
 * @endpoint_out: he endpoint number to use for out transfer
 */
127 128 129 130
struct cport_to_ep {
	__le16 cport_id;
	__u8 endpoint_in;
	__u8 endpoint_out;
131 132
};

133
static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
134
{
135
	return (struct es2_ap_dev *)&hd->hd_priv;
136 137 138
}

static void cport_out_callback(struct urb *urb);
139 140
static void usb_log_enable(struct es2_ap_dev *es2);
static void usb_log_disable(struct es2_ap_dev *es2);
141

142
/* Get the endpoints pair mapped to the cport */
143
static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
144
{
145
	if (cport_id >= es2->hd->num_cports)
146
		return 0;
147
	return es2->cport_to_ep[cport_id];
148 149
}

150
#define ES2_TIMEOUT	500	/* 500 ms for the SVC to do something */
151

152 153
/* Disable for now until we work all of this out to keep a warning-free build */
#if 0
154
/* Test if the endpoints pair is already mapped to a cport */
155
static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
156 157 158
{
	int i;

159 160
	for (i = 0; i < es2->hd->num_cports; i++) {
		if (es2->cport_to_ep[i] == ep_pair)
161 162 163 164 165
			return 1;
	}
	return 0;
}

166
/* Configure the endpoint mapping and send the request to APBridge */
167
static int map_cport_to_ep(struct es2_ap_dev *es2,
168
				u16 cport_id, int ep_pair)
169 170 171 172
{
	int retval;
	struct cport_to_ep *cport_to_ep;

173
	if (ep_pair < 0 || ep_pair >= NUM_BULKS)
174
		return -EINVAL;
175
	if (cport_id >= es2->hd->num_cports)
176
		return -EINVAL;
177
	if (ep_pair && ep_pair_in_use(es2, ep_pair))
178 179 180 181 182 183
		return -EINVAL;

	cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
	if (!cport_to_ep)
		return -ENOMEM;

184
	es2->cport_to_ep[cport_id] = ep_pair;
185
	cport_to_ep->cport_id = cpu_to_le16(cport_id);
186 187
	cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
	cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
188

189 190
	retval = usb_control_msg(es2->usb_dev,
				 usb_sndctrlpipe(es2->usb_dev, 0),
191 192 193 194 195
				 REQUEST_EP_MAPPING,
				 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
				 0x00, 0x00,
				 (char *)cport_to_ep,
				 sizeof(*cport_to_ep),
196
				 ES2_TIMEOUT);
197 198 199 200 201 202 203
	if (retval == sizeof(*cport_to_ep))
		retval = 0;
	kfree(cport_to_ep);

	return retval;
}

204
/* Unmap a cport: use the muxed endpoints pair */
205
static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
206
{
207
	return map_cport_to_ep(es2, cport_id, 0);
208
}
209
#endif
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
static int es2_cport_in_enable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int ret;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];

		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			dev_err(&es2->usb_dev->dev,
					"failed to submit in-urb: %d\n", ret);
			goto err_kill_urbs;
		}
	}

	return 0;

err_kill_urbs:
	for (--i; i >= 0; --i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}

	return ret;
}

240 241 242 243 244 245 246 247 248 249 250 251
static void es2_cport_in_disable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}
}

252
static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
253 254 255 256 257
{
	struct urb *urb = NULL;
	unsigned long flags;
	int i;

258
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
259 260 261

	/* Look in our pool of allocated urbs first, as that's the "fastest" */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
262 263 264 265
		if (es2->cport_out_urb_busy[i] == false &&
				es2->cport_out_urb_cancelled[i] == false) {
			es2->cport_out_urb_busy[i] = true;
			urb = es2->cport_out_urb[i];
266 267 268
			break;
		}
	}
269
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
270 271 272 273 274 275 276
	if (urb)
		return urb;

	/*
	 * Crap, pool is empty, complain to the syslog and go allocate one
	 * dynamically as we have to succeed.
	 */
277
	dev_err(&es2->usb_dev->dev,
278 279 280 281
		"No free CPort OUT urbs, having to dynamically allocate one!\n");
	return usb_alloc_urb(0, gfp_mask);
}

282
static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
283 284 285 286 287 288 289
{
	unsigned long flags;
	int i;
	/*
	 * See if this was an urb in our pool, if so mark it "free", otherwise
	 * we need to free it ourselves.
	 */
290
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
291
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
292 293
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_busy[i] = false;
294 295 296 297
			urb = NULL;
			break;
		}
	}
298
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
299 300 301 302 303

	/* If urb is not NULL, then we need to free this urb */
	usb_free_urb(urb);
}

304 305 306 307 308 309 310
/*
 * We (ab)use the operation-message header pad bytes to transfer the
 * cport id in order to minimise overhead.
 */
static void
gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
{
311
	header->pad[0] = cport_id;
312 313 314 315 316
}

/* Clear the pad bytes used for the CPort id */
static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
{
317
	header->pad[0] = 0;
318 319 320 321 322
}

/* Extract the CPort id packed into the header, and clear it */
static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
{
323
	u16 cport_id = header->pad[0];
324 325 326 327 328 329

	gb_message_cport_clear(header);

	return cport_id;
}

330
/*
331 332
 * Returns zero if the message was successfully queued, or a negative errno
 * otherwise.
333
 */
334
static int message_send(struct gb_host_device *hd, u16 cport_id,
335
			struct gb_message *message, gfp_t gfp_mask)
336
{
337 338
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
339
	size_t buffer_size;
340 341
	int retval;
	struct urb *urb;
342
	int ep_pair;
343
	unsigned long flags;
344 345 346 347 348 349

	/*
	 * The data actually transferred will include an indication
	 * of where the data should be sent.  Do one last check of
	 * the target CPort id before filling it in.
	 */
350
	if (!cport_id_valid(hd, cport_id)) {
351 352
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
				cport_id);
353
		return -EINVAL;
354 355 356
	}

	/* Find a free urb */
357
	urb = next_free_urb(es2, gfp_mask);
358
	if (!urb)
359 360
		return -ENOMEM;

361
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
362
	message->hcpriv = urb;
363
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
364

365 366
	/* Pack the cport id into the message header */
	gb_message_cport_pack(message->header, cport_id);
367

A
Alex Elder 已提交
368
	buffer_size = sizeof(*message->header) + message->payload_size;
369

370
	ep_pair = cport_to_ep_pair(es2, cport_id);
371
	usb_fill_bulk_urb(urb, udev,
372
			  usb_sndbulkpipe(udev,
373
					  es2->cport_out[ep_pair].endpoint),
A
Alex Elder 已提交
374
			  message->buffer, buffer_size,
375
			  cport_out_callback, message);
376
	urb->transfer_flags |= URB_ZERO_PACKET;
377
	trace_gb_host_device_send(hd, cport_id, buffer_size);
378 379
	retval = usb_submit_urb(urb, gfp_mask);
	if (retval) {
380
		dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
381

382
		spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
383
		message->hcpriv = NULL;
384
		spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
385

386
		free_urb(es2, urb);
387
		gb_message_cport_clear(message->header);
388 389

		return retval;
390 391
	}

392
	return 0;
393 394 395
}

/*
396
 * Can not be called in atomic context.
397
 */
398
static void message_cancel(struct gb_message *message)
399
{
400
	struct gb_host_device *hd = message->operation->connection->hd;
401
	struct es2_ap_dev *es2 = hd_to_es2(hd);
402 403
	struct urb *urb;
	int i;
404

405 406
	might_sleep();

407
	spin_lock_irq(&es2->cport_out_urb_lock);
408 409 410 411 412 413 414
	urb = message->hcpriv;

	/* Prevent dynamically allocated urb from being deallocated. */
	usb_get_urb(urb);

	/* Prevent pre-allocated urb from being reused. */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
415 416
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_cancelled[i] = true;
417 418 419
			break;
		}
	}
420
	spin_unlock_irq(&es2->cport_out_urb_lock);
421 422 423 424

	usb_kill_urb(urb);

	if (i < NUM_CPORT_OUT_URB) {
425 426 427
		spin_lock_irq(&es2->cport_out_urb_lock);
		es2->cport_out_urb_cancelled[i] = false;
		spin_unlock_irq(&es2->cport_out_urb_lock);
428 429 430
	}

	usb_free_urb(urb);
431 432
}

433
static int cport_reset(struct gb_host_device *hd, u16 cport_id)
434
{
435 436
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
437 438 439 440 441
	int retval;

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_RESET_CPORT,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
442
				 USB_RECIP_INTERFACE, cport_id, 0,
443
				 NULL, 0, ES2_TIMEOUT);
444
	if (retval < 0) {
445
		dev_err(&udev->dev, "failed to reset cport %u: %d\n", cport_id,
446 447 448 449 450 451 452
			retval);
		return retval;
	}

	return 0;
}

453
static int cport_enable(struct gb_host_device *hd, u16 cport_id)
454 455 456 457 458 459 460 461 462 463 464 465
{
	int retval;

	if (cport_id != GB_SVC_CPORT_ID) {
		retval = cport_reset(hd, cport_id);
		if (retval)
			return retval;
	}

	return 0;
}

466
static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
467 468
{
	int retval;
469 470
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
471 472 473 474 475 476 477 478 479 480 481

	if (!cport_id_valid(hd, cport_id)) {
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
			cport_id);
		return -EINVAL;
	}

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_LATENCY_TAG_EN,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
482
				 0, ES2_TIMEOUT);
483 484 485 486 487 488 489

	if (retval < 0)
		dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
			cport_id);
	return retval;
}

490
static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
491 492
{
	int retval;
493 494
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
495 496 497 498 499 500 501 502 503 504 505

	if (!cport_id_valid(hd, cport_id)) {
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
			cport_id);
		return -EINVAL;
	}

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_LATENCY_TAG_DIS,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
506
				 0, ES2_TIMEOUT);
507 508 509 510 511 512 513

	if (retval < 0)
		dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
			cport_id);
	return retval;
}

514
static struct gb_hd_driver es2_driver = {
515
	.hd_priv_size		= sizeof(struct es2_ap_dev),
516 517
	.message_send		= message_send,
	.message_cancel		= message_cancel,
518
	.cport_enable		= cport_enable,
519 520
	.latency_tag_enable	= latency_tag_enable,
	.latency_tag_disable	= latency_tag_disable,
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
};

/* Common function to report consistent warnings based on URB status */
static int check_urb_status(struct urb *urb)
{
	struct device *dev = &urb->dev->dev;
	int status = urb->status;

	switch (status) {
	case 0:
		return 0;

	case -EOVERFLOW:
		dev_err(dev, "%s: overflow actual length is %d\n",
			__func__, urb->actual_length);
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
	case -EILSEQ:
	case -EPROTO:
		/* device is gone, stop sending */
		return status;
	}
	dev_err(dev, "%s: unknown status %d\n", __func__, status);

	return -EAGAIN;
}

549
static void es2_destroy(struct es2_ap_dev *es2)
550 551
{
	struct usb_device *udev;
552
	int bulk_in;
553 554
	int i;

555
	debugfs_remove(es2->apb_log_enable_dentry);
556
	usb_log_disable(es2);
557

558 559
	/* Tear down everything! */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
560
		struct urb *urb = es2->cport_out_urb[i];
561 562 563 564 565

		if (!urb)
			break;
		usb_kill_urb(urb);
		usb_free_urb(urb);
566 567
		es2->cport_out_urb[i] = NULL;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
568 569
	}

570
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
571 572
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

573 574 575 576 577 578 579 580 581
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb = cport_in->urb[i];

			if (!urb)
				break;
			usb_free_urb(urb);
			kfree(cport_in->buffer[i]);
			cport_in->buffer[i] = NULL;
		}
582 583
	}

584 585
	kfree(es2->cport_to_ep);

586
	udev = es2->usb_dev;
587
	gb_hd_put(es2->hd);
588 589 590 591

	usb_put_dev(udev);
}

592 593 594 595 596 597 598 599
static void ap_disconnect(struct usb_interface *interface)
{
	struct es2_ap_dev *es2 = usb_get_intfdata(interface);
	int i;

	for (i = 0; i < NUM_BULKS; ++i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);

600 601
	gb_hd_del(es2->hd);

602 603 604
	es2_destroy(es2);
}

605 606
static void cport_in_callback(struct urb *urb)
{
607
	struct gb_host_device *hd = urb->context;
608
	struct device *dev = &urb->dev->dev;
609
	struct gb_operation_msg_hdr *header;
610 611 612 613 614 615 616 617 618 619 620
	int status = check_urb_status(urb);
	int retval;
	u16 cport_id;

	if (status) {
		if ((status == -EAGAIN) || (status == -EPROTO))
			goto exit;
		dev_err(dev, "urb cport in error %d (dropped)\n", status);
		return;
	}

621
	if (urb->actual_length < sizeof(*header)) {
622
		dev_err(dev, "short message received\n");
623 624 625
		goto exit;
	}

626
	/* Extract the CPort id, which is packed in the message header */
627
	header = urb->transfer_buffer;
628
	cport_id = gb_message_cport_unpack(header);
629

630 631
	if (cport_id_valid(hd, cport_id)) {
		trace_gb_host_device_recv(hd, cport_id, urb->actual_length);
A
Alex Elder 已提交
632
		greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
633
							urb->actual_length);
634
	} else {
635
		dev_err(dev, "invalid cport id 0x%02x received\n", cport_id);
636
	}
637 638 639 640
exit:
	/* put our urb back in the request pool */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
641
		dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
642 643 644 645
}

static void cport_out_callback(struct urb *urb)
{
646
	struct gb_message *message = urb->context;
647
	struct gb_host_device *hd = message->operation->connection->hd;
648
	struct es2_ap_dev *es2 = hd_to_es2(hd);
649
	int status = check_urb_status(urb);
650
	unsigned long flags;
651

652
	gb_message_cport_clear(message->header);
653

654
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
655
	message->hcpriv = NULL;
656
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
657

658
	/*
659 660
	 * Tell the submitter that the message send (attempt) is
	 * complete, and report the status.
661
	 */
662 663
	greybus_message_sent(hd, message, status);

664
	free_urb(es2, urb);
665 666
}

667
#define APB1_LOG_MSG_SIZE	64
668
static void apb_log_get(struct es2_ap_dev *es2, char *buf)
669 670 671 672 673
{
	int retval;

	/* SVC messages go down our control pipe */
	do {
674 675
		retval = usb_control_msg(es2->usb_dev,
					usb_rcvctrlpipe(es2->usb_dev, 0),
676
					REQUEST_LOG,
677 678 679
					USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
					0x00, 0x00,
					buf,
680
					APB1_LOG_MSG_SIZE,
681
					ES2_TIMEOUT);
682
		if (retval > 0)
683
			kfifo_in(&es2->apb_log_fifo, buf, retval);
684 685 686
	} while (retval > 0);
}

687
static int apb_log_poll(void *data)
688
{
689
	struct es2_ap_dev *es2 = data;
690 691 692 693 694 695
	char *buf;

	buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

696 697
	while (!kthread_should_stop()) {
		msleep(1000);
698
		apb_log_get(es2, buf);
699
	}
700 701 702

	kfree(buf);

703 704 705
	return 0;
}

706
static ssize_t apb_log_read(struct file *f, char __user *buf,
707 708
				size_t count, loff_t *ppos)
{
709
	struct es2_ap_dev *es2 = f->f_inode->i_private;
710 711 712 713 714 715 716 717 718 719 720
	ssize_t ret;
	size_t copied;
	char *tmp_buf;

	if (count > APB1_LOG_SIZE)
		count = APB1_LOG_SIZE;

	tmp_buf = kmalloc(count, GFP_KERNEL);
	if (!tmp_buf)
		return -ENOMEM;

721
	copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
722 723 724 725 726 727 728
	ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);

	kfree(tmp_buf);

	return ret;
}

729 730
static const struct file_operations apb_log_fops = {
	.read	= apb_log_read,
731 732
};

733
static void usb_log_enable(struct es2_ap_dev *es2)
734
{
735
	if (!IS_ERR_OR_NULL(es2->apb_log_task))
736 737 738
		return;

	/* get log from APB1 */
739 740
	es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
	if (IS_ERR(es2->apb_log_task))
741
		return;
742 743
	/* XXX We will need to rename this per APB */
	es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
744
						gb_debugfs_get(), NULL,
745
						&apb_log_fops);
746 747
}

748
static void usb_log_disable(struct es2_ap_dev *es2)
749
{
750
	if (IS_ERR_OR_NULL(es2->apb_log_task))
751 752
		return;

753 754
	debugfs_remove(es2->apb_log_dentry);
	es2->apb_log_dentry = NULL;
755

756 757
	kthread_stop(es2->apb_log_task);
	es2->apb_log_task = NULL;
758 759
}

760
static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
761 762
				size_t count, loff_t *ppos)
{
763
	struct es2_ap_dev *es2 = f->f_inode->i_private;
764
	int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
765
	char tmp_buf[3];
766 767 768 769 770

	sprintf(tmp_buf, "%d\n", enable);
	return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
}

771
static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
772 773 774 775
				size_t count, loff_t *ppos)
{
	int enable;
	ssize_t retval;
776
	struct es2_ap_dev *es2 = f->f_inode->i_private;
777 778 779 780 781 782

	retval = kstrtoint_from_user(buf, count, 10, &enable);
	if (retval)
		return retval;

	if (enable)
783
		usb_log_enable(es2);
784
	else
785
		usb_log_disable(es2);
786 787 788 789

	return count;
}

790 791 792
static const struct file_operations apb_log_enable_fops = {
	.read	= apb_log_enable_read,
	.write	= apb_log_enable_write,
793 794
};

795
static int apb_get_cport_count(struct usb_device *udev)
796 797 798 799 800 801 802 803 804 805 806 807
{
	int retval;
	__le16 *cport_count;

	cport_count = kmalloc(sizeof(*cport_count), GFP_KERNEL);
	if (!cport_count)
		return -ENOMEM;

	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
				 REQUEST_CPORT_COUNT,
				 USB_DIR_IN | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, cport_count,
808
				 sizeof(*cport_count), ES2_TIMEOUT);
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	if (retval < 0) {
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			retval);
		goto out;
	}

	retval = le16_to_cpu(*cport_count);

	/* We need to fit a CPort ID in one byte of a message header */
	if (retval > U8_MAX) {
		retval = U8_MAX;
		dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
	}

out:
	kfree(cport_count);
	return retval;
}

828
/*
829 830 831 832
 * The ES2 USB Bridge device has 15 endpoints
 * 1 Control - usual USB stuff + AP -> APBridgeA messages
 * 7 Bulk IN - CPort data in
 * 7 Bulk OUT - CPort data out
833 834 835 836
 */
static int ap_probe(struct usb_interface *interface,
		    const struct usb_device_id *id)
{
837
	struct es2_ap_dev *es2;
838
	struct gb_host_device *hd;
839 840 841
	struct usb_device *udev;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
842 843
	int bulk_in = 0;
	int bulk_out = 0;
844 845
	int retval = -ENOMEM;
	int i;
846
	int num_cports;
847

848 849
	udev = usb_get_dev(interface_to_usbdev(interface));

850
	num_cports = apb_get_cport_count(udev);
851 852 853 854 855 856 857
	if (num_cports < 0) {
		usb_put_dev(udev);
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			num_cports);
		return num_cports;
	}

858 859
	hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
				num_cports);
860
	if (IS_ERR(hd)) {
861
		usb_put_dev(udev);
862
		return PTR_ERR(hd);
863 864
	}

865 866 867 868 869
	es2 = hd_to_es2(hd);
	es2->hd = hd;
	es2->usb_intf = interface;
	es2->usb_dev = udev;
	spin_lock_init(&es2->cport_out_urb_lock);
870
	INIT_KFIFO(es2->apb_log_fifo);
871
	usb_set_intfdata(interface, es2);
872

873
	es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
874
				   GFP_KERNEL);
875
	if (!es2->cport_to_ep) {
876 877 878 879
		retval = -ENOMEM;
		goto error;
	}

880
	/* find all bulk endpoints */
881 882 883 884
	iface_desc = interface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;

885
		if (usb_endpoint_is_bulk_in(endpoint)) {
886
			es2->cport_in[bulk_in++].endpoint =
887
				endpoint->bEndpointAddress;
888
		} else if (usb_endpoint_is_bulk_out(endpoint)) {
889
			es2->cport_out[bulk_out++].endpoint =
890
				endpoint->bEndpointAddress;
891 892
		} else {
			dev_err(&udev->dev,
893
				"Unknown endpoint type found, address %02x\n",
894 895 896
				endpoint->bEndpointAddress);
		}
	}
897
	if (bulk_in != NUM_BULKS || bulk_out != NUM_BULKS) {
898 899 900 901
		dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
		goto error;
	}

902
	/* Allocate buffers for our cport in messages */
903
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
904 905
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

906 907 908 909 910 911 912
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb;
			u8 *buffer;

			urb = usb_alloc_urb(0, GFP_KERNEL);
			if (!urb)
				goto error;
913
			buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
914 915 916 917 918 919
			if (!buffer)
				goto error;

			usb_fill_bulk_urb(urb, udev,
					  usb_rcvbulkpipe(udev,
							  cport_in->endpoint),
920
					  buffer, ES2_GBUF_MSG_SIZE_MAX,
921 922 923 924
					  cport_in_callback, hd);
			cport_in->urb[i] = urb;
			cport_in->buffer[i] = buffer;
		}
925 926 927 928 929 930 931 932 933 934
	}

	/* Allocate urbs for our CPort OUT messages */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		struct urb *urb;

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb)
			goto error;

935 936
		es2->cport_out_urb[i] = urb;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
937 938
	}

939 940
	/* XXX We will need to rename this per APB */
	es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
941
							(S_IWUSR | S_IRUGO),
942
							gb_debugfs_get(), es2,
943
							&apb_log_enable_fops);
944

945 946 947 948
	retval = gb_hd_add(hd);
	if (retval)
		goto error;

949 950 951
	for (i = 0; i < NUM_BULKS; ++i) {
		retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
		if (retval)
952
			goto err_disable_cport_in;
953 954
	}

955
	return 0;
956 957 958 959

err_disable_cport_in:
	for (--i; i >= 0; --i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);
960
	gb_hd_del(hd);
961
error:
962
	es2_destroy(es2);
963 964 965 966

	return retval;
}

967
static struct usb_driver es2_ap_driver = {
968
	.name =		"es2_ap_driver",
969 970 971 972 973
	.probe =	ap_probe,
	.disconnect =	ap_disconnect,
	.id_table =	id_table,
};

974
module_usb_driver(es2_ap_driver);
975

976
MODULE_LICENSE("GPL v2");
977
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");