es2.c 23.4 KB
Newer Older
1 2 3
/*
 * Greybus "AP" USB driver for "ES2" controller chips
 *
4 5
 * Copyright 2014-2015 Google Inc.
 * Copyright 2014-2015 Linaro Ltd.
6 7 8
 *
 * Released under the GPLv2 only.
 */
9
#include <linux/kthread.h>
10 11
#include <linux/sizes.h>
#include <linux/usb.h>
12 13
#include <linux/kfifo.h>
#include <linux/debugfs.h>
14
#include <asm/unaligned.h>
15 16 17

#include "greybus.h"
#include "kernel_ver.h"
18
#include "connection.h"
19
#include "greybus_trace.h"
20

21 22
/* Memory sizes for the buffers sent to/from the ES2 controller */
#define ES2_GBUF_MSG_SIZE_MAX	2048
23 24

static const struct usb_device_id id_table[] = {
25 26
	{ USB_DEVICE(0xffff, 0x0002) },	/* Made up number, delete once firmware is fixed to use real number */
	{ USB_DEVICE(0x18d1, 0x1eaf) },
27 28 29 30
	{ },
};
MODULE_DEVICE_TABLE(usb, id_table);

31 32
#define APB1_LOG_SIZE		SZ_16K

33 34 35
/* Number of bulk in and bulk out couple */
#define NUM_BULKS		7

36 37 38 39 40 41 42 43 44 45
/*
 * Number of CPort IN urbs in flight at any point in time.
 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
 * flight.
 */
#define NUM_CPORT_IN_URB	4

/* Number of CPort OUT urbs in flight at any point in time.
 * Adjust if we get messages saying we are out of urbs in the system log.
 */
46
#define NUM_CPORT_OUT_URB	(8 * NUM_BULKS)
47

48 49 50
/* vendor request APB1 log */
#define REQUEST_LOG		0x02

51 52 53
/* vendor request to map a cport to bulk in and bulk out endpoints */
#define REQUEST_EP_MAPPING	0x03

54 55 56
/* vendor request to get the number of cports available */
#define REQUEST_CPORT_COUNT	0x04

57 58 59
/* vendor request to reset a cport state */
#define REQUEST_RESET_CPORT	0x05

60 61 62 63
/* vendor request to time the latency of messages on a given cport */
#define REQUEST_LATENCY_TAG_EN	0x06
#define REQUEST_LATENCY_TAG_DIS	0x07

64 65 66 67 68
/*
 * @endpoint: bulk in endpoint for CPort data
 * @urb: array of urbs for the CPort in messages
 * @buffer: array of buffers for the @cport_in_urb urbs
 */
69
struct es2_cport_in {
70 71 72 73 74 75 76 77
	__u8 endpoint;
	struct urb *urb[NUM_CPORT_IN_URB];
	u8 *buffer[NUM_CPORT_IN_URB];
};

/*
 * @endpoint: bulk out endpoint for CPort data
 */
78
struct es2_cport_out {
79 80 81
	__u8 endpoint;
};

82
/**
83
 * es2_ap_dev - ES2 USB Bridge to AP structure
84 85
 * @usb_dev: pointer to the USB device we are.
 * @usb_intf: pointer to the USB interface we are bound to.
86
 * @hd: pointer to our gb_host_device structure
87 88 89

 * @cport_in: endpoint, urbs and buffer for cport in messages
 * @cport_out: endpoint for for cport out messages
90 91 92
 * @cport_out_urb: array of urbs for the CPort out messages
 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
 *			not.
93 94
 * @cport_out_urb_cancelled: array of flags indicating whether the
 *			corresponding @cport_out_urb is being cancelled
95
 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
96
 *
97 98 99 100
 * @apb_log_task: task pointer for logging thread
 * @apb_log_dentry: file system entry for the log file interface
 * @apb_log_enable_dentry: file system entry for enabling logging
 * @apb_log_fifo: kernel FIFO to carry logged data
101
 */
102
struct es2_ap_dev {
103 104
	struct usb_device *usb_dev;
	struct usb_interface *usb_intf;
105
	struct gb_host_device *hd;
106

107 108
	struct es2_cport_in cport_in[NUM_BULKS];
	struct es2_cport_out cport_out[NUM_BULKS];
109 110
	struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
	bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
111
	bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
112
	spinlock_t cport_out_urb_lock;
113

114
	int *cport_to_ep;
115

116 117 118 119
	struct task_struct *apb_log_task;
	struct dentry *apb_log_dentry;
	struct dentry *apb_log_enable_dentry;
	DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
120 121
};

122 123 124 125 126 127
/**
 * cport_to_ep - information about cport to endpoints mapping
 * @cport_id: the id of cport to map to endpoints
 * @endpoint_in: the endpoint number to use for in transfer
 * @endpoint_out: he endpoint number to use for out transfer
 */
128 129 130 131
struct cport_to_ep {
	__le16 cport_id;
	__u8 endpoint_in;
	__u8 endpoint_out;
132 133
};

134
static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
135
{
136
	return (struct es2_ap_dev *)&hd->hd_priv;
137 138 139
}

static void cport_out_callback(struct urb *urb);
140 141
static void usb_log_enable(struct es2_ap_dev *es2);
static void usb_log_disable(struct es2_ap_dev *es2);
142

143
/* Get the endpoints pair mapped to the cport */
144
static int cport_to_ep_pair(struct es2_ap_dev *es2, u16 cport_id)
145
{
146
	if (cport_id >= es2->hd->num_cports)
147
		return 0;
148
	return es2->cport_to_ep[cport_id];
149 150
}

151
#define ES2_TIMEOUT	500	/* 500 ms for the SVC to do something */
152

153 154
/* Disable for now until we work all of this out to keep a warning-free build */
#if 0
155
/* Test if the endpoints pair is already mapped to a cport */
156
static int ep_pair_in_use(struct es2_ap_dev *es2, int ep_pair)
157 158 159
{
	int i;

160 161
	for (i = 0; i < es2->hd->num_cports; i++) {
		if (es2->cport_to_ep[i] == ep_pair)
162 163 164 165 166
			return 1;
	}
	return 0;
}

167
/* Configure the endpoint mapping and send the request to APBridge */
168
static int map_cport_to_ep(struct es2_ap_dev *es2,
169
				u16 cport_id, int ep_pair)
170 171 172 173
{
	int retval;
	struct cport_to_ep *cport_to_ep;

174
	if (ep_pair < 0 || ep_pair >= NUM_BULKS)
175
		return -EINVAL;
176
	if (cport_id >= es2->hd->num_cports)
177
		return -EINVAL;
178
	if (ep_pair && ep_pair_in_use(es2, ep_pair))
179 180 181 182 183 184
		return -EINVAL;

	cport_to_ep = kmalloc(sizeof(*cport_to_ep), GFP_KERNEL);
	if (!cport_to_ep)
		return -ENOMEM;

185
	es2->cport_to_ep[cport_id] = ep_pair;
186
	cport_to_ep->cport_id = cpu_to_le16(cport_id);
187 188
	cport_to_ep->endpoint_in = es2->cport_in[ep_pair].endpoint;
	cport_to_ep->endpoint_out = es2->cport_out[ep_pair].endpoint;
189

190 191
	retval = usb_control_msg(es2->usb_dev,
				 usb_sndctrlpipe(es2->usb_dev, 0),
192 193 194 195 196
				 REQUEST_EP_MAPPING,
				 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
				 0x00, 0x00,
				 (char *)cport_to_ep,
				 sizeof(*cport_to_ep),
197
				 ES2_TIMEOUT);
198 199 200 201 202 203 204
	if (retval == sizeof(*cport_to_ep))
		retval = 0;
	kfree(cport_to_ep);

	return retval;
}

205
/* Unmap a cport: use the muxed endpoints pair */
206
static int unmap_cport(struct es2_ap_dev *es2, u16 cport_id)
207
{
208
	return map_cport_to_ep(es2, cport_id, 0);
209
}
210
#endif
211

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static int es2_cport_in_enable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int ret;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];

		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			dev_err(&es2->usb_dev->dev,
					"failed to submit in-urb: %d\n", ret);
			goto err_kill_urbs;
		}
	}

	return 0;

err_kill_urbs:
	for (--i; i >= 0; --i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}

	return ret;
}

241 242 243 244 245 246 247 248 249 250 251 252
static void es2_cport_in_disable(struct es2_ap_dev *es2,
				struct es2_cport_in *cport_in)
{
	struct urb *urb;
	int i;

	for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
		urb = cport_in->urb[i];
		usb_kill_urb(urb);
	}
}

253
static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
254 255 256 257 258
{
	struct urb *urb = NULL;
	unsigned long flags;
	int i;

259
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
260 261 262

	/* Look in our pool of allocated urbs first, as that's the "fastest" */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
263 264 265 266
		if (es2->cport_out_urb_busy[i] == false &&
				es2->cport_out_urb_cancelled[i] == false) {
			es2->cport_out_urb_busy[i] = true;
			urb = es2->cport_out_urb[i];
267 268 269
			break;
		}
	}
270
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
271 272 273 274 275 276 277
	if (urb)
		return urb;

	/*
	 * Crap, pool is empty, complain to the syslog and go allocate one
	 * dynamically as we have to succeed.
	 */
278
	dev_err(&es2->usb_dev->dev,
279 280 281 282
		"No free CPort OUT urbs, having to dynamically allocate one!\n");
	return usb_alloc_urb(0, gfp_mask);
}

283
static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
284 285 286 287 288 289 290
{
	unsigned long flags;
	int i;
	/*
	 * See if this was an urb in our pool, if so mark it "free", otherwise
	 * we need to free it ourselves.
	 */
291
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
292
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
293 294
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_busy[i] = false;
295 296 297 298
			urb = NULL;
			break;
		}
	}
299
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
300 301 302 303 304

	/* If urb is not NULL, then we need to free this urb */
	usb_free_urb(urb);
}

305 306 307 308 309 310 311
/*
 * We (ab)use the operation-message header pad bytes to transfer the
 * cport id in order to minimise overhead.
 */
static void
gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
{
312
	header->pad[0] = cport_id;
313 314 315 316 317
}

/* Clear the pad bytes used for the CPort id */
static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
{
318
	header->pad[0] = 0;
319 320 321 322 323
}

/* Extract the CPort id packed into the header, and clear it */
static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
{
324
	u16 cport_id = header->pad[0];
325 326 327 328 329 330

	gb_message_cport_clear(header);

	return cport_id;
}

331
/*
332 333
 * Returns zero if the message was successfully queued, or a negative errno
 * otherwise.
334
 */
335
static int message_send(struct gb_host_device *hd, u16 cport_id,
336
			struct gb_message *message, gfp_t gfp_mask)
337
{
338 339
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
340
	size_t buffer_size;
341 342
	int retval;
	struct urb *urb;
343
	int ep_pair;
344
	unsigned long flags;
345 346 347 348 349 350

	/*
	 * The data actually transferred will include an indication
	 * of where the data should be sent.  Do one last check of
	 * the target CPort id before filling it in.
	 */
351
	if (!cport_id_valid(hd, cport_id)) {
352 353
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
				cport_id);
354
		return -EINVAL;
355 356 357
	}

	/* Find a free urb */
358
	urb = next_free_urb(es2, gfp_mask);
359
	if (!urb)
360 361
		return -ENOMEM;

362
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
363
	message->hcpriv = urb;
364
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
365

366 367
	/* Pack the cport id into the message header */
	gb_message_cport_pack(message->header, cport_id);
368

A
Alex Elder 已提交
369
	buffer_size = sizeof(*message->header) + message->payload_size;
370

371
	ep_pair = cport_to_ep_pair(es2, cport_id);
372
	usb_fill_bulk_urb(urb, udev,
373
			  usb_sndbulkpipe(udev,
374
					  es2->cport_out[ep_pair].endpoint),
A
Alex Elder 已提交
375
			  message->buffer, buffer_size,
376
			  cport_out_callback, message);
377
	urb->transfer_flags |= URB_ZERO_PACKET;
378
	trace_gb_host_device_send(hd, cport_id, buffer_size);
379 380
	retval = usb_submit_urb(urb, gfp_mask);
	if (retval) {
381
		dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
382

383
		spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
384
		message->hcpriv = NULL;
385
		spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
386

387
		free_urb(es2, urb);
388
		gb_message_cport_clear(message->header);
389 390

		return retval;
391 392
	}

393
	return 0;
394 395 396
}

/*
397
 * Can not be called in atomic context.
398
 */
399
static void message_cancel(struct gb_message *message)
400
{
401
	struct gb_host_device *hd = message->operation->connection->hd;
402
	struct es2_ap_dev *es2 = hd_to_es2(hd);
403 404
	struct urb *urb;
	int i;
405

406 407
	might_sleep();

408
	spin_lock_irq(&es2->cport_out_urb_lock);
409 410 411 412 413 414 415
	urb = message->hcpriv;

	/* Prevent dynamically allocated urb from being deallocated. */
	usb_get_urb(urb);

	/* Prevent pre-allocated urb from being reused. */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
416 417
		if (urb == es2->cport_out_urb[i]) {
			es2->cport_out_urb_cancelled[i] = true;
418 419 420
			break;
		}
	}
421
	spin_unlock_irq(&es2->cport_out_urb_lock);
422 423 424 425

	usb_kill_urb(urb);

	if (i < NUM_CPORT_OUT_URB) {
426 427 428
		spin_lock_irq(&es2->cport_out_urb_lock);
		es2->cport_out_urb_cancelled[i] = false;
		spin_unlock_irq(&es2->cport_out_urb_lock);
429 430 431
	}

	usb_free_urb(urb);
432 433
}

434
static int cport_reset(struct gb_host_device *hd, u16 cport_id)
435
{
436 437
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
438 439 440 441 442 443
	int retval;

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_RESET_CPORT,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, cport_id,
444
				 NULL, 0, ES2_TIMEOUT);
445 446 447 448 449 450 451 452 453
	if (retval < 0) {
		dev_err(&udev->dev, "failed to reset cport %hu: %d\n", cport_id,
			retval);
		return retval;
	}

	return 0;
}

454
static int cport_enable(struct gb_host_device *hd, u16 cport_id)
455 456 457 458 459 460 461 462 463 464 465 466
{
	int retval;

	if (cport_id != GB_SVC_CPORT_ID) {
		retval = cport_reset(hd, cport_id);
		if (retval)
			return retval;
	}

	return 0;
}

467
static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
468 469
{
	int retval;
470 471
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
472 473 474 475 476 477 478 479 480 481 482

	if (!cport_id_valid(hd, cport_id)) {
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
			cport_id);
		return -EINVAL;
	}

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_LATENCY_TAG_EN,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
483
				 0, ES2_TIMEOUT);
484 485 486 487 488 489 490

	if (retval < 0)
		dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
			cport_id);
	return retval;
}

491
static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
492 493
{
	int retval;
494 495
	struct es2_ap_dev *es2 = hd_to_es2(hd);
	struct usb_device *udev = es2->usb_dev;
496 497 498 499 500 501 502 503 504 505 506

	if (!cport_id_valid(hd, cport_id)) {
		dev_err(&udev->dev, "invalid destination cport 0x%02x\n",
			cport_id);
		return -EINVAL;
	}

	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
				 REQUEST_LATENCY_TAG_DIS,
				 USB_DIR_OUT | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, cport_id, 0, NULL,
507
				 0, ES2_TIMEOUT);
508 509 510 511 512 513 514

	if (retval < 0)
		dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
			cport_id);
	return retval;
}

515
static struct gb_hd_driver es2_driver = {
516
	.hd_priv_size		= sizeof(struct es2_ap_dev),
517 518
	.message_send		= message_send,
	.message_cancel		= message_cancel,
519
	.cport_enable		= cport_enable,
520 521
	.latency_tag_enable	= latency_tag_enable,
	.latency_tag_disable	= latency_tag_disable,
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
};

/* Common function to report consistent warnings based on URB status */
static int check_urb_status(struct urb *urb)
{
	struct device *dev = &urb->dev->dev;
	int status = urb->status;

	switch (status) {
	case 0:
		return 0;

	case -EOVERFLOW:
		dev_err(dev, "%s: overflow actual length is %d\n",
			__func__, urb->actual_length);
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
	case -EILSEQ:
	case -EPROTO:
		/* device is gone, stop sending */
		return status;
	}
	dev_err(dev, "%s: unknown status %d\n", __func__, status);

	return -EAGAIN;
}

550
static void es2_destroy(struct es2_ap_dev *es2)
551 552
{
	struct usb_device *udev;
553
	int *cport_to_ep;
554
	int bulk_in;
555 556
	int i;

557
	debugfs_remove(es2->apb_log_enable_dentry);
558
	usb_log_disable(es2);
559

560 561
	/* Tear down everything! */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
562
		struct urb *urb = es2->cport_out_urb[i];
563 564 565 566 567

		if (!urb)
			break;
		usb_kill_urb(urb);
		usb_free_urb(urb);
568 569
		es2->cport_out_urb[i] = NULL;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
570 571
	}

572
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
573 574
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

575 576 577 578 579 580 581 582 583
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb = cport_in->urb[i];

			if (!urb)
				break;
			usb_free_urb(urb);
			kfree(cport_in->buffer[i]);
			cport_in->buffer[i] = NULL;
		}
584 585
	}

586
	udev = es2->usb_dev;
587
	cport_to_ep = es2->cport_to_ep;
588
	gb_hd_put(es2->hd);
589

590
	kfree(cport_to_ep);
591 592 593
	usb_put_dev(udev);
}

594 595 596 597 598 599 600 601
static void ap_disconnect(struct usb_interface *interface)
{
	struct es2_ap_dev *es2 = usb_get_intfdata(interface);
	int i;

	for (i = 0; i < NUM_BULKS; ++i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);

602 603
	gb_hd_del(es2->hd);

604 605 606
	es2_destroy(es2);
}

607 608
static void cport_in_callback(struct urb *urb)
{
609
	struct gb_host_device *hd = urb->context;
610
	struct device *dev = &urb->dev->dev;
611
	struct gb_operation_msg_hdr *header;
612 613 614 615 616 617 618 619 620 621 622
	int status = check_urb_status(urb);
	int retval;
	u16 cport_id;

	if (status) {
		if ((status == -EAGAIN) || (status == -EPROTO))
			goto exit;
		dev_err(dev, "urb cport in error %d (dropped)\n", status);
		return;
	}

623
	if (urb->actual_length < sizeof(*header)) {
624
		dev_err(dev, "short message received\n");
625 626 627
		goto exit;
	}

628
	/* Extract the CPort id, which is packed in the message header */
629
	header = urb->transfer_buffer;
630
	cport_id = gb_message_cport_unpack(header);
631

632 633
	if (cport_id_valid(hd, cport_id)) {
		trace_gb_host_device_recv(hd, cport_id, urb->actual_length);
A
Alex Elder 已提交
634
		greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
635
							urb->actual_length);
636
	} else {
637
		dev_err(dev, "invalid cport id 0x%02x received\n", cport_id);
638
	}
639 640 641 642
exit:
	/* put our urb back in the request pool */
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
643
		dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
644 645 646 647
}

static void cport_out_callback(struct urb *urb)
{
648
	struct gb_message *message = urb->context;
649
	struct gb_host_device *hd = message->operation->connection->hd;
650
	struct es2_ap_dev *es2 = hd_to_es2(hd);
651
	int status = check_urb_status(urb);
652
	unsigned long flags;
653

654
	gb_message_cport_clear(message->header);
655

656
	spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
657
	message->hcpriv = NULL;
658
	spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
659

660
	/*
661 662
	 * Tell the submitter that the message send (attempt) is
	 * complete, and report the status.
663
	 */
664 665
	greybus_message_sent(hd, message, status);

666
	free_urb(es2, urb);
667 668
}

669
#define APB1_LOG_MSG_SIZE	64
670
static void apb_log_get(struct es2_ap_dev *es2, char *buf)
671 672 673 674 675
{
	int retval;

	/* SVC messages go down our control pipe */
	do {
676 677
		retval = usb_control_msg(es2->usb_dev,
					usb_rcvctrlpipe(es2->usb_dev, 0),
678
					REQUEST_LOG,
679 680 681
					USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
					0x00, 0x00,
					buf,
682
					APB1_LOG_MSG_SIZE,
683
					ES2_TIMEOUT);
684
		if (retval > 0)
685
			kfifo_in(&es2->apb_log_fifo, buf, retval);
686 687 688
	} while (retval > 0);
}

689
static int apb_log_poll(void *data)
690
{
691
	struct es2_ap_dev *es2 = data;
692 693 694 695 696 697
	char *buf;

	buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

698 699
	while (!kthread_should_stop()) {
		msleep(1000);
700
		apb_log_get(es2, buf);
701
	}
702 703 704

	kfree(buf);

705 706 707
	return 0;
}

708
static ssize_t apb_log_read(struct file *f, char __user *buf,
709 710
				size_t count, loff_t *ppos)
{
711
	struct es2_ap_dev *es2 = f->f_inode->i_private;
712 713 714 715 716 717 718 719 720 721 722
	ssize_t ret;
	size_t copied;
	char *tmp_buf;

	if (count > APB1_LOG_SIZE)
		count = APB1_LOG_SIZE;

	tmp_buf = kmalloc(count, GFP_KERNEL);
	if (!tmp_buf)
		return -ENOMEM;

723
	copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
724 725 726 727 728 729 730
	ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);

	kfree(tmp_buf);

	return ret;
}

731 732
static const struct file_operations apb_log_fops = {
	.read	= apb_log_read,
733 734
};

735
static void usb_log_enable(struct es2_ap_dev *es2)
736
{
737
	if (!IS_ERR_OR_NULL(es2->apb_log_task))
738 739 740
		return;

	/* get log from APB1 */
741 742
	es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
	if (IS_ERR(es2->apb_log_task))
743
		return;
744 745
	/* XXX We will need to rename this per APB */
	es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
746
						gb_debugfs_get(), NULL,
747
						&apb_log_fops);
748 749
}

750
static void usb_log_disable(struct es2_ap_dev *es2)
751
{
752
	if (IS_ERR_OR_NULL(es2->apb_log_task))
753 754
		return;

755 756
	debugfs_remove(es2->apb_log_dentry);
	es2->apb_log_dentry = NULL;
757

758 759
	kthread_stop(es2->apb_log_task);
	es2->apb_log_task = NULL;
760 761
}

762
static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
763 764
				size_t count, loff_t *ppos)
{
765
	struct es2_ap_dev *es2 = f->f_inode->i_private;
766
	int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
767
	char tmp_buf[3];
768 769 770 771 772

	sprintf(tmp_buf, "%d\n", enable);
	return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
}

773
static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
774 775 776 777
				size_t count, loff_t *ppos)
{
	int enable;
	ssize_t retval;
778
	struct es2_ap_dev *es2 = f->f_inode->i_private;
779 780 781 782 783 784

	retval = kstrtoint_from_user(buf, count, 10, &enable);
	if (retval)
		return retval;

	if (enable)
785
		usb_log_enable(es2);
786
	else
787
		usb_log_disable(es2);
788 789 790 791

	return count;
}

792 793 794
static const struct file_operations apb_log_enable_fops = {
	.read	= apb_log_enable_read,
	.write	= apb_log_enable_write,
795 796
};

797
static int apb_get_cport_count(struct usb_device *udev)
798 799 800 801 802 803 804 805 806 807 808 809
{
	int retval;
	__le16 *cport_count;

	cport_count = kmalloc(sizeof(*cport_count), GFP_KERNEL);
	if (!cport_count)
		return -ENOMEM;

	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
				 REQUEST_CPORT_COUNT,
				 USB_DIR_IN | USB_TYPE_VENDOR |
				 USB_RECIP_INTERFACE, 0, 0, cport_count,
810
				 sizeof(*cport_count), ES2_TIMEOUT);
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
	if (retval < 0) {
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			retval);
		goto out;
	}

	retval = le16_to_cpu(*cport_count);

	/* We need to fit a CPort ID in one byte of a message header */
	if (retval > U8_MAX) {
		retval = U8_MAX;
		dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
	}

out:
	kfree(cport_count);
	return retval;
}

830
/*
831 832 833 834
 * The ES2 USB Bridge device has 15 endpoints
 * 1 Control - usual USB stuff + AP -> APBridgeA messages
 * 7 Bulk IN - CPort data in
 * 7 Bulk OUT - CPort data out
835 836 837 838
 */
static int ap_probe(struct usb_interface *interface,
		    const struct usb_device_id *id)
{
839
	struct es2_ap_dev *es2;
840
	struct gb_host_device *hd;
841 842 843
	struct usb_device *udev;
	struct usb_host_interface *iface_desc;
	struct usb_endpoint_descriptor *endpoint;
844 845
	int bulk_in = 0;
	int bulk_out = 0;
846 847
	int retval = -ENOMEM;
	int i;
848
	int num_cports;
849

850 851
	udev = usb_get_dev(interface_to_usbdev(interface));

852
	num_cports = apb_get_cport_count(udev);
853 854 855 856 857 858 859
	if (num_cports < 0) {
		usb_put_dev(udev);
		dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
			num_cports);
		return num_cports;
	}

860 861
	hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
				num_cports);
862
	if (IS_ERR(hd)) {
863
		usb_put_dev(udev);
864
		return PTR_ERR(hd);
865 866
	}

867 868 869 870 871
	es2 = hd_to_es2(hd);
	es2->hd = hd;
	es2->usb_intf = interface;
	es2->usb_dev = udev;
	spin_lock_init(&es2->cport_out_urb_lock);
872
	INIT_KFIFO(es2->apb_log_fifo);
873
	usb_set_intfdata(interface, es2);
874

875
	es2->cport_to_ep = kcalloc(hd->num_cports, sizeof(*es2->cport_to_ep),
876
				   GFP_KERNEL);
877
	if (!es2->cport_to_ep) {
878 879 880 881
		retval = -ENOMEM;
		goto error;
	}

882
	/* find all bulk endpoints */
883 884 885 886
	iface_desc = interface->cur_altsetting;
	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;

887
		if (usb_endpoint_is_bulk_in(endpoint)) {
888
			es2->cport_in[bulk_in++].endpoint =
889
				endpoint->bEndpointAddress;
890
		} else if (usb_endpoint_is_bulk_out(endpoint)) {
891
			es2->cport_out[bulk_out++].endpoint =
892
				endpoint->bEndpointAddress;
893 894 895 896 897 898
		} else {
			dev_err(&udev->dev,
				"Unknown endpoint type found, address %x\n",
				endpoint->bEndpointAddress);
		}
	}
899
	if (bulk_in != NUM_BULKS || bulk_out != NUM_BULKS) {
900 901 902 903
		dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
		goto error;
	}

904
	/* Allocate buffers for our cport in messages */
905
	for (bulk_in = 0; bulk_in < NUM_BULKS; bulk_in++) {
906 907
		struct es2_cport_in *cport_in = &es2->cport_in[bulk_in];

908 909 910 911 912 913 914
		for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
			struct urb *urb;
			u8 *buffer;

			urb = usb_alloc_urb(0, GFP_KERNEL);
			if (!urb)
				goto error;
915
			buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
916 917 918 919 920 921
			if (!buffer)
				goto error;

			usb_fill_bulk_urb(urb, udev,
					  usb_rcvbulkpipe(udev,
							  cport_in->endpoint),
922
					  buffer, ES2_GBUF_MSG_SIZE_MAX,
923 924 925 926
					  cport_in_callback, hd);
			cport_in->urb[i] = urb;
			cport_in->buffer[i] = buffer;
		}
927 928 929 930 931 932 933 934 935 936
	}

	/* Allocate urbs for our CPort OUT messages */
	for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
		struct urb *urb;

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb)
			goto error;

937 938
		es2->cport_out_urb[i] = urb;
		es2->cport_out_urb_busy[i] = false;	/* just to be anal */
939 940
	}

941 942
	/* XXX We will need to rename this per APB */
	es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
943
							(S_IWUSR | S_IRUGO),
944
							gb_debugfs_get(), es2,
945
							&apb_log_enable_fops);
946

947 948 949 950
	retval = gb_hd_add(hd);
	if (retval)
		goto error;

951 952 953
	for (i = 0; i < NUM_BULKS; ++i) {
		retval = es2_cport_in_enable(es2, &es2->cport_in[i]);
		if (retval)
954
			goto err_disable_cport_in;
955 956
	}

957
	return 0;
958 959 960 961

err_disable_cport_in:
	for (--i; i >= 0; --i)
		es2_cport_in_disable(es2, &es2->cport_in[i]);
962
	gb_hd_del(hd);
963
error:
964
	es2_destroy(es2);
965 966 967 968

	return retval;
}

969
static struct usb_driver es2_ap_driver = {
970
	.name =		"es2_ap_driver",
971 972 973 974 975
	.probe =	ap_probe,
	.disconnect =	ap_disconnect,
	.id_table =	id_table,
};

976
module_usb_driver(es2_ap_driver);
977

978
MODULE_LICENSE("GPL v2");
979
MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");