maple.c 20.9 KB
Newer Older
1 2 3
/*
 * Core maple bus functionality
 *
A
Adrian McMenamin 已提交
4
 *  Copyright (C) 2007, 2008 Adrian McMenamin
5
 *  Copyright (C) 2001 - 2008 Paul Mundt
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Based on 2.4 code by:
 *
 *  Copyright (C) 2000-2001 YAEGASHI Takeshi
 *  Copyright (C) 2001 M. R. Brown
 *  Copyright (C) 2001 Paul Mundt
 *
 * and others.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/maple.h>
#include <linux/dma-mapping.h>
28
#include <linux/delay.h>
29 30 31
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/io.h>
32 33
#include <mach/dma.h>
#include <mach/sysasic.h>
34

35
MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin");
36 37 38 39 40 41 42 43 44 45 46 47 48
MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
MODULE_LICENSE("GPL v2");
MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");

static void maple_dma_handler(struct work_struct *work);
static void maple_vblank_handler(struct work_struct *work);

static DECLARE_WORK(maple_dma_process, maple_dma_handler);
static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);

static LIST_HEAD(maple_waitq);
static LIST_HEAD(maple_sentq);

49 50
/* mutex to protect queue of waiting packets */
static DEFINE_MUTEX(maple_wlist_lock);
51 52 53 54 55 56

static struct maple_driver maple_dummy_driver;
static struct device maple_bus;
static int subdevice_map[MAPLE_PORTS];
static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
static unsigned long maple_pnp_time;
57
static int started, scanning, fullscan;
58 59 60
static struct kmem_cache *maple_queue_cache;

struct maple_device_specify {
61 62
	int port;
	int unit;
63 64
};

A
Adrian McMenamin 已提交
65 66 67
static bool checked[4];
static struct maple_device *baseunits[4];

68
/**
69 70 71 72 73
 * maple_driver_register - register a maple driver
 * @drv: maple driver to be registered.
 *
 * Registers the passed in @drv, while updating the bus type.
 * Devices with matching function IDs will be automatically probed.
74
 */
75
int maple_driver_register(struct maple_driver *drv)
76
{
77 78
	if (!drv)
		return -EINVAL;
79 80 81 82

	drv->drv.bus = &maple_bus_type;

	return driver_register(&drv->drv);
83 84 85
}
EXPORT_SYMBOL_GPL(maple_driver_register);

86 87 88 89 90 91 92 93 94 95 96 97
/**
 * maple_driver_unregister - unregister a maple driver.
 * @drv: maple driver to unregister.
 *
 * Cleans up after maple_driver_register(). To be invoked in the exit
 * path of any module drivers.
 */
void maple_driver_unregister(struct maple_driver *drv)
{
	driver_unregister(&drv->drv);
}

98 99 100
/* set hardware registers to enable next round of dma */
static void maplebus_dma_reset(void)
{
101 102 103 104 105 106
	ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
	/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
	ctrl_outl(1, MAPLE_TRIGTYPE);
	ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
	ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
	ctrl_outl(1, MAPLE_ENABLE);
107 108 109 110 111 112 113 114 115 116
}

/**
 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
 * @dev: device responding
 * @callback: handler callback
 * @interval: interval in jiffies between callbacks
 * @function: the function code for the device
 */
void maple_getcond_callback(struct maple_device *dev,
117 118
			void (*callback) (struct mapleq *mq),
			unsigned long interval, unsigned long function)
119
{
120 121 122 123
	dev->callback = callback;
	dev->interval = interval;
	dev->function = cpu_to_be32(function);
	dev->when = jiffies;
124 125 126 127 128
}
EXPORT_SYMBOL_GPL(maple_getcond_callback);

static int maple_dma_done(void)
{
129
	return (ctrl_inl(MAPLE_STATE) & 1) == 0;
130 131 132 133
}

static void maple_release_device(struct device *dev)
{
134 135 136 137 138 139 140 141 142 143 144
	struct maple_device *mdev;
	struct mapleq *mq;
	if (!dev)
		return;
	mdev = to_maple_dev(dev);
	mq = mdev->mq;
	if (mq) {
		if (mq->recvbufdcsp)
			kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
		kfree(mq);
		mq = NULL;
145
	}
146
	kfree(mdev);
147 148
}

149
/*
150
 * maple_add_packet - add a single instruction to the queue
151 152 153 154 155
 * @mdev - maple device
 * @function - function on device being queried
 * @command - maple command to add
 * @length - length of command string (in 32 bit words)
 * @data - remainder of command string
156
 */
157 158
int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
	size_t length, void *data)
159
{
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	int locking, ret = 0;
	void *sendbuf = NULL;

	mutex_lock(&maple_wlist_lock);
	/* bounce if device already locked */
	locking = mutex_is_locked(&mdev->mq->mutex);
	if (locking) {
		ret = -EBUSY;
		goto out;
	}

	mutex_lock(&mdev->mq->mutex);

	if (length) {
		sendbuf = kmalloc(length * 4, GFP_KERNEL);
		if (!sendbuf) {
			mutex_unlock(&mdev->mq->mutex);
			ret = -ENOMEM;
			goto out;
		}
		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
	}

	mdev->mq->command = command;
	mdev->mq->length = length;
	if (length > 1)
		memcpy(sendbuf + 4, data, (length - 1) * 4);
	mdev->mq->sendbuf = sendbuf;

	list_add(&mdev->mq->list, &maple_waitq);
out:
	mutex_unlock(&maple_wlist_lock);
	return ret;
193 194 195
}
EXPORT_SYMBOL_GPL(maple_add_packet);

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/*
 * maple_add_packet_sleeps - add a single instruction to the queue
 *  - waits for lock to be free
 * @mdev - maple device
 * @function - function on device being queried
 * @command - maple command to add
 * @length - length of command string (in 32 bit words)
 * @data - remainder of command string
 */
int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
	u32 command, size_t length, void *data)
{
	int locking, ret = 0;
	void *sendbuf = NULL;

	locking = mutex_lock_interruptible(&mdev->mq->mutex);
	if (locking) {
		ret = -EIO;
		goto out;
	}

	if (length) {
		sendbuf = kmalloc(length * 4, GFP_KERNEL);
		if (!sendbuf) {
			mutex_unlock(&mdev->mq->mutex);
			ret = -ENOMEM;
			goto out;
		}
		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
	}

	mdev->mq->command = command;
	mdev->mq->length = length;
	if (length > 1)
		memcpy(sendbuf + 4, data, (length - 1) * 4);
	mdev->mq->sendbuf = sendbuf;

	mutex_lock(&maple_wlist_lock);
	list_add(&mdev->mq->list, &maple_waitq);
	mutex_unlock(&maple_wlist_lock);
out:
	return ret;
}
EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);

241
static struct mapleq *maple_allocq(struct maple_device *mdev)
242
{
243
	struct mapleq *mq;
244

245 246
	mq = kmalloc(sizeof(*mq), GFP_KERNEL);
	if (!mq)
247
		goto failed_nomem;
248

249
	mq->dev = mdev;
250 251
	mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
	mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
252 253 254 255 256 257 258 259
	if (!mq->recvbuf)
		goto failed_p2;
	/*
	 * most devices do not need the mutex - but
	 * anything that injects block reads or writes
	 * will rely on it
	 */
	mutex_init(&mq->mutex);
260

261
	return mq;
262 263 264 265 266

failed_p2:
	kfree(mq);
failed_nomem:
	return NULL;
267 268 269 270
}

static struct maple_device *maple_alloc_dev(int port, int unit)
{
271
	struct maple_device *mdev;
272

273 274
	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
	if (!mdev)
275
		return NULL;
276

277 278 279
	mdev->port = port;
	mdev->unit = unit;
	mdev->mq = maple_allocq(mdev);
280

281 282
	if (!mdev->mq) {
		kfree(mdev);
283 284
		return NULL;
	}
285 286 287
	mdev->dev.bus = &maple_bus_type;
	mdev->dev.parent = &maple_bus;
	return mdev;
288 289 290 291
}

static void maple_free_dev(struct maple_device *mdev)
{
292 293 294
	if (!mdev)
		return;
	if (mdev->mq) {
295 296 297
		if (mdev->mq->recvbufdcsp)
			kmem_cache_free(maple_queue_cache,
				mdev->mq->recvbufdcsp);
298 299 300
		kfree(mdev->mq);
	}
	kfree(mdev);
301 302 303 304 305 306 307
}

/* process the command queue into a maple command block
 * terminating command has bit 32 of first long set to 0
 */
static void maple_build_block(struct mapleq *mq)
{
308 309
	int port, unit, from, to, len;
	unsigned long *lsendbuf = mq->sendbuf;
310

311 312 313 314 315
	port = mq->dev->port & 3;
	unit = mq->dev->unit;
	len = mq->length;
	from = port << 6;
	to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
316

317 318
	*maple_lastptr &= 0x7fffffff;
	maple_lastptr = maple_sendptr;
319

320 321 322 323 324 325
	*maple_sendptr++ = (port << 16) | len | 0x80000000;
	*maple_sendptr++ = PHYSADDR(mq->recvbuf);
	*maple_sendptr++ =
	    mq->command | (to << 8) | (from << 16) | (len << 24);
	while (len-- > 0)
		*maple_sendptr++ = *lsendbuf++;
326 327 328 329 330
}

/* build up command queue */
static void maple_send(void)
{
331
	int i, maple_packets = 0;
332 333 334 335
	struct mapleq *mq, *nmq;

	if (!list_empty(&maple_sentq))
		return;
336 337 338
	mutex_lock(&maple_wlist_lock);
	if (list_empty(&maple_waitq) || !maple_dma_done()) {
		mutex_unlock(&maple_wlist_lock);
339
		return;
340 341 342 343 344
	}
	mutex_unlock(&maple_wlist_lock);
	maple_lastptr = maple_sendbuf;
	maple_sendptr = maple_sendbuf;
	mutex_lock(&maple_wlist_lock);
345 346 347 348 349 350
	list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
		maple_build_block(mq);
		list_move(&mq->list, &maple_sentq);
		if (maple_packets++ > MAPLE_MAXPACKETS)
			break;
	}
351
	mutex_unlock(&maple_wlist_lock);
352 353 354 355 356
	if (maple_packets > 0) {
		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
			dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
	}
357 358
}

359 360
/* check if there is a driver registered likely to match this device */
static int check_matching_maple_driver(struct device_driver *driver,
361
					void *devptr)
362
{
363 364 365 366 367
	struct maple_driver *maple_drv;
	struct maple_device *mdev;

	mdev = devptr;
	maple_drv = to_maple_driver(driver);
368 369
	if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
		return 1;
370
	return 0;
371 372 373 374
}

static void maple_detach_driver(struct maple_device *mdev)
{
375 376
	if (!mdev)
		return;
377 378
	device_unregister(&mdev->dev);
	mdev = NULL;
379 380 381
}

/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
382
static void maple_attach_driver(struct maple_device *mdev)
383
{
384
	char *p, *recvbuf;
385 386 387
	unsigned long function;
	int matched, retval;

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	recvbuf = mdev->mq->recvbuf;
	/* copy the data as individual elements in
	* case of memory optimisation */
	memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
	memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
	memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
	memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
	memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
	memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
	memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
	memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
	memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
	mdev->product_name[30] = '\0';
	memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
	mdev->product_licence[60] = '\0';

	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
405 406 407 408
		if (*p == ' ')
			*p = '\0';
		else
			break;
409
	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
410 411 412 413 414
		if (*p == ' ')
			*p = '\0';
		else
			break;

A
Adrian McMenamin 已提交
415 416 417
	printk(KERN_INFO "Maple device detected: %s\n",
		mdev->product_name);
	printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
418 419

	function = be32_to_cpu(mdev->devinfo.function);
420 421 422 423

	if (function > 0x200) {
		/* Do this silently - as not a real device */
		function = 0;
424 425
		mdev->driver = &maple_dummy_driver;
		sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
426
	} else {
A
Adrian McMenamin 已提交
427 428 429
		printk(KERN_INFO
			"Maple bus at (%d, %d): Function 0x%lX\n",
			mdev->port, mdev->unit, function);
430 431

		matched =
432 433
			bus_for_each_drv(&maple_bus_type, NULL, mdev,
				check_matching_maple_driver);
434 435 436

		if (matched == 0) {
			/* Driver does not exist yet */
A
Adrian McMenamin 已提交
437 438
			printk(KERN_INFO
				"No maple driver found.\n");
439
			mdev->driver = &maple_dummy_driver;
440
		}
441 442
		sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
			mdev->unit, function);
443
	}
444 445 446
	mdev->function = function;
	mdev->dev.release = &maple_release_device;
	retval = device_register(&mdev->dev);
447 448
	if (retval) {
		printk(KERN_INFO
449 450 451 452 453 454
		"Maple bus: Attempt to register device"
		" (%x, %x) failed.\n",
		mdev->port, mdev->unit);
		maple_free_dev(mdev);
		mdev = NULL;
		return;
455
	}
456 457 458 459 460 461 462 463 464
}

/*
 * if device has been registered for the given
 * port and unit then return 1 - allows identification
 * of which devices need to be attached or detached
 */
static int detach_maple_device(struct device *device, void *portptr)
{
465 466 467 468 469 470 471 472
	struct maple_device_specify *ds;
	struct maple_device *mdev;

	ds = portptr;
	mdev = to_maple_dev(device);
	if (mdev->port == ds->port && mdev->unit == ds->unit)
		return 1;
	return 0;
473 474 475 476
}

static int setup_maple_commands(struct device *device, void *ignored)
{
477
	int add;
478 479 480 481
	struct maple_device *maple_dev = to_maple_dev(device);

	if ((maple_dev->interval > 0)
	    && time_after(jiffies, maple_dev->when)) {
482 483 484 485 486 487
		/* bounce if we cannot lock */
		add = maple_add_packet(maple_dev,
			be32_to_cpu(maple_dev->devinfo.function),
			MAPLE_COMMAND_GETCOND, 1, NULL);
		if (!add)
			maple_dev->when = jiffies + maple_dev->interval;
488
	} else {
489 490 491 492
		if (time_after(jiffies, maple_pnp_time))
			/* This will also bounce */
			maple_add_packet(maple_dev, 0,
				MAPLE_COMMAND_DEVINFO, 0, NULL);
493 494
	}
	return 0;
495 496 497 498 499
}

/* VBLANK bottom half - implemented via workqueue */
static void maple_vblank_handler(struct work_struct *work)
{
500
	if (!list_empty(&maple_sentq) || !maple_dma_done())
501
		return;
502

503
	ctrl_outl(0, MAPLE_ENABLE);
504

505 506
	bus_for_each_dev(&maple_bus_type, NULL, NULL,
			 setup_maple_commands);
507

508 509
	if (time_after(jiffies, maple_pnp_time))
		maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
510 511 512 513

	mutex_lock(&maple_wlist_lock);
	if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
		mutex_unlock(&maple_wlist_lock);
514
		maple_send();
515 516
	} else {
		mutex_unlock(&maple_wlist_lock);
517
	}
518

519
	maplebus_dma_reset();
520 521 522 523 524
}

/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
static void maple_map_subunits(struct maple_device *mdev, int submask)
{
525 526 527 528
	int retval, k, devcheck;
	struct maple_device *mdev_add;
	struct maple_device_specify ds;

529
	ds.port = mdev->port;
530 531 532 533 534 535 536 537 538 539 540 541 542 543
	for (k = 0; k < 5; k++) {
		ds.unit = k + 1;
		retval =
		    bus_for_each_dev(&maple_bus_type, NULL, &ds,
				     detach_maple_device);
		if (retval) {
			submask = submask >> 1;
			continue;
		}
		devcheck = submask & 0x01;
		if (devcheck) {
			mdev_add = maple_alloc_dev(mdev->port, k + 1);
			if (!mdev_add)
				return;
544 545 546
			maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
				0, NULL);
			/* mark that we are checking sub devices */
547 548 549 550
			scanning = 1;
		}
		submask = submask >> 1;
	}
551 552 553 554 555
}

/* mark a device as removed */
static void maple_clean_submap(struct maple_device *mdev)
{
556
	int killbit;
557

558 559 560 561
	killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
	killbit = ~killbit;
	killbit &= 0xFF;
	subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
562 563 564 565
}

/* handle empty port or hotplug removal */
static void maple_response_none(struct maple_device *mdev,
566
				struct mapleq *mq)
567
{
568 569 570 571 572 573 574 575 576
	if (mdev->unit != 0) {
		list_del(&mq->list);
		maple_clean_submap(mdev);
		printk(KERN_INFO
		       "Maple bus device detaching at (%d, %d)\n",
		       mdev->port, mdev->unit);
		maple_detach_driver(mdev);
		return;
	}
A
Adrian McMenamin 已提交
577 578 579 580 581 582
	if (!started || !fullscan) {
		if (checked[mdev->port] == false) {
			checked[mdev->port] = true;
			printk(KERN_INFO "No maple devices attached"
				" to port %d\n", mdev->port);
		}
583 584 585
		return;
	}
	maple_clean_submap(mdev);
586 587 588 589
}

/* preprocess hotplugs or scans */
static void maple_response_devinfo(struct maple_device *mdev,
590
				   char *recvbuf)
591
{
592
	char submask;
A
Adrian McMenamin 已提交
593 594 595 596 597 598 599 600
	if (!started || (scanning == 2) || !fullscan) {
		if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
			checked[mdev->port] = true;
			maple_attach_driver(mdev);
		} else {
			if (mdev->unit != 0)
				maple_attach_driver(mdev);
		}
601 602 603 604 605 606 607 608 609
		return;
	}
	if (mdev->unit == 0) {
		submask = recvbuf[2] & 0x1F;
		if (submask ^ subdevice_map[mdev->port]) {
			maple_map_subunits(mdev, submask);
			subdevice_map[mdev->port] = submask;
		}
	}
610 611
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
static void maple_port_rescan(void)
{
	int i;
	struct maple_device *mdev;

	fullscan = 1;
	for (i = 0; i < MAPLE_PORTS; i++) {
		if (checked[i] == false) {
			fullscan = 0;
			mdev = baseunits[i];
			/*
			 *  test lock in case scan has failed
			 *  but device is still locked
			 */
			if (mutex_is_locked(&mdev->mq->mutex))
				mutex_unlock(&mdev->mq->mutex);
			maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
				0, NULL);
		}
	}
}

634 635 636
/* maple dma end bottom half - implemented via workqueue */
static void maple_dma_handler(struct work_struct *work)
{
637 638 639 640 641 642 643 644 645 646 647 648 649
	struct mapleq *mq, *nmq;
	struct maple_device *dev;
	char *recvbuf;
	enum maple_code code;

	if (!maple_dma_done())
		return;
	ctrl_outl(0, MAPLE_ENABLE);
	if (!list_empty(&maple_sentq)) {
		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
			recvbuf = mq->recvbuf;
			code = recvbuf[0];
			dev = mq->dev;
650 651 652 653
			kfree(mq->sendbuf);
			mutex_unlock(&mq->mutex);
			list_del_init(&mq->list);

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
			switch (code) {
			case MAPLE_RESPONSE_NONE:
				maple_response_none(dev, mq);
				break;

			case MAPLE_RESPONSE_DEVINFO:
				maple_response_devinfo(dev, recvbuf);
				break;

			case MAPLE_RESPONSE_DATATRF:
				if (dev->callback)
					dev->callback(mq);
				break;

			case MAPLE_RESPONSE_FILEERR:
			case MAPLE_RESPONSE_AGAIN:
			case MAPLE_RESPONSE_BADCMD:
			case MAPLE_RESPONSE_BADFUNC:
				printk(KERN_DEBUG
				       "Maple non-fatal error 0x%X\n",
				       code);
				break;

			case MAPLE_RESPONSE_ALLINFO:
				printk(KERN_DEBUG
679 680
				       "Maple - extended device information"
					" not supported\n");
681 682 683 684 685 686 687 688 689
				break;

			case MAPLE_RESPONSE_OK:
				break;

			default:
				break;
			}
		}
690
		/* if scanning is 1 then we have subdevices to check */
691 692 693 694 695
		if (scanning == 1) {
			maple_send();
			scanning = 2;
		} else
			scanning = 0;
696 697 698 699
		/*check if we have actually tested all ports yet */
		if (!fullscan)
			maple_port_rescan();
		/* mark that we have been through the first scan */
700 701 702 703
		if (started == 0)
			started = 1;
	}
	maplebus_dma_reset();
704 705 706 707
}

static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
{
708 709 710
	/* Load everything into the bottom half */
	schedule_work(&maple_dma_process);
	return IRQ_HANDLED;
711 712 713 714
}

static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
{
715 716
	schedule_work(&maple_vblank_process);
	return IRQ_HANDLED;
717 718 719 720
}

static int maple_set_dma_interrupt_handler(void)
{
721 722
	return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
		IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
723 724 725 726
}

static int maple_set_vblank_interrupt_handler(void)
{
727 728
	return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
		IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
729 730 731 732
}

static int maple_get_dma_buffer(void)
{
733 734 735 736 737 738
	maple_sendbuf =
	    (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
				      MAPLE_DMA_PAGES);
	if (!maple_sendbuf)
		return -ENOMEM;
	return 0;
739 740 741
}

static int match_maple_bus_driver(struct device *devptr,
742
				  struct device_driver *drvptr)
743
{
744 745
	struct maple_driver *maple_drv = to_maple_driver(drvptr);
	struct maple_device *maple_dev = to_maple_dev(devptr);
746 747 748 749 750

	/* Trap empty port case */
	if (maple_dev->devinfo.function == 0xFFFFFFFF)
		return 0;
	else if (maple_dev->devinfo.function &
751
		 cpu_to_be32(maple_drv->function))
752 753
		return 1;
	return 0;
754 755
}

756 757
static int maple_bus_uevent(struct device *dev,
			    struct kobj_uevent_env *env)
758
{
759
	return 0;
760 761 762 763 764 765 766
}

static void maple_bus_release(struct device *dev)
{
}

static struct maple_driver maple_dummy_driver = {
767 768 769
	.drv = {
		.name = "maple_dummy_driver",
		.bus = &maple_bus_type,
770
	},
771 772 773
};

struct bus_type maple_bus_type = {
774 775 776
	.name = "maple",
	.match = match_maple_bus_driver,
	.uevent = maple_bus_uevent,
777 778 779 780
};
EXPORT_SYMBOL_GPL(maple_bus_type);

static struct device maple_bus = {
781 782
	.bus_id = "maple",
	.release = maple_bus_release,
783 784 785 786
};

static int __init maple_bus_init(void)
{
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	int retval, i;
	struct maple_device *mdev[MAPLE_PORTS];
	ctrl_outl(0, MAPLE_STATE);

	retval = device_register(&maple_bus);
	if (retval)
		goto cleanup;

	retval = bus_register(&maple_bus_type);
	if (retval)
		goto cleanup_device;

	retval = driver_register(&maple_dummy_driver.drv);
	if (retval)
		goto cleanup_bus;

	/* allocate memory for maple bus dma */
	retval = maple_get_dma_buffer();
	if (retval) {
		printk(KERN_INFO
		       "Maple bus: Failed to allocate Maple DMA buffers\n");
		goto cleanup_basic;
	}

	/* set up DMA interrupt handler */
	retval = maple_set_dma_interrupt_handler();
	if (retval) {
		printk(KERN_INFO
		       "Maple bus: Failed to grab maple DMA IRQ\n");
		goto cleanup_dma;
	}

	/* set up VBLANK interrupt handler */
	retval = maple_set_vblank_interrupt_handler();
	if (retval) {
		printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
		goto cleanup_irq;
	}

	maple_queue_cache =
	    kmem_cache_create("maple_queue_cache", 0x400, 0,
828
			      SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
829 830 831 832

	if (!maple_queue_cache)
		goto cleanup_bothirqs;

833 834 835
	INIT_LIST_HEAD(&maple_waitq);
	INIT_LIST_HEAD(&maple_sentq);

836 837
	/* setup maple ports */
	for (i = 0; i < MAPLE_PORTS; i++) {
A
Adrian McMenamin 已提交
838
		checked[i] = false;
839
		mdev[i] = maple_alloc_dev(i, 0);
A
Adrian McMenamin 已提交
840
		baseunits[i] = mdev[i];
841 842 843 844 845
		if (!mdev[i]) {
			while (i-- > 0)
				maple_free_dev(mdev[i]);
			goto cleanup_cache;
		}
846
		maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
847 848 849 850 851 852 853 854 855 856 857 858
		subdevice_map[i] = 0;
	}

	/* setup maplebus hardware */
	maplebus_dma_reset();
	/* initial detection */
	maple_send();
	maple_pnp_time = jiffies;
	printk(KERN_INFO "Maple bus core now registered.\n");

	return 0;

859
cleanup_cache:
860 861
	kmem_cache_destroy(maple_queue_cache);

862
cleanup_bothirqs:
863 864
	free_irq(HW_EVENT_VSYNC, 0);

865
cleanup_irq:
866 867
	free_irq(HW_EVENT_MAPLE_DMA, 0);

868
cleanup_dma:
869 870
	free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);

871
cleanup_basic:
872 873
	driver_unregister(&maple_dummy_driver.drv);

874
cleanup_bus:
875 876
	bus_unregister(&maple_bus_type);

877
cleanup_device:
878 879
	device_unregister(&maple_bus);

880
cleanup:
881 882
	printk(KERN_INFO "Maple bus registration failed\n");
	return retval;
883
}
884 885
/* Push init to later to ensure hardware gets detected */
fs_initcall(maple_bus_init);