maple.c 20.6 KB
Newer Older
1 2 3
/*
 * Core maple bus functionality
 *
A
Adrian McMenamin 已提交
4
 *  Copyright (C) 2007, 2008 Adrian McMenamin
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Based on 2.4 code by:
 *
 *  Copyright (C) 2000-2001 YAEGASHI Takeshi
 *  Copyright (C) 2001 M. R. Brown
 *  Copyright (C) 2001 Paul Mundt
 *
 * and others.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/maple.h>
#include <linux/dma-mapping.h>
27
#include <linux/delay.h>
28 29 30
#include <asm/cacheflush.h>
#include <asm/dma.h>
#include <asm/io.h>
31 32
#include <mach/dma.h>
#include <mach/sysasic.h>
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
MODULE_LICENSE("GPL v2");
MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");

static void maple_dma_handler(struct work_struct *work);
static void maple_vblank_handler(struct work_struct *work);

static DECLARE_WORK(maple_dma_process, maple_dma_handler);
static DECLARE_WORK(maple_vblank_process, maple_vblank_handler);

static LIST_HEAD(maple_waitq);
static LIST_HEAD(maple_sentq);

48 49
/* mutex to protect queue of waiting packets */
static DEFINE_MUTEX(maple_wlist_lock);
50 51 52 53 54 55

static struct maple_driver maple_dummy_driver;
static struct device maple_bus;
static int subdevice_map[MAPLE_PORTS];
static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
static unsigned long maple_pnp_time;
56
static int started, scanning, fullscan;
57 58 59
static struct kmem_cache *maple_queue_cache;

struct maple_device_specify {
60 61
	int port;
	int unit;
62 63
};

A
Adrian McMenamin 已提交
64 65 66
static bool checked[4];
static struct maple_device *baseunits[4];

67 68 69 70 71 72 73
/**
 *  maple_driver_register - register a device driver
 *  automatically makes the driver bus a maple bus
 *  @drv: the driver to be registered
 */
int maple_driver_register(struct device_driver *drv)
{
74 75 76 77
	if (!drv)
		return -EINVAL;
	drv->bus = &maple_bus_type;
	return driver_register(drv);
78 79 80 81 82 83
}
EXPORT_SYMBOL_GPL(maple_driver_register);

/* set hardware registers to enable next round of dma */
static void maplebus_dma_reset(void)
{
84 85 86 87 88 89
	ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
	/* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
	ctrl_outl(1, MAPLE_TRIGTYPE);
	ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
	ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
	ctrl_outl(1, MAPLE_ENABLE);
90 91 92 93 94 95 96 97 98 99
}

/**
 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
 * @dev: device responding
 * @callback: handler callback
 * @interval: interval in jiffies between callbacks
 * @function: the function code for the device
 */
void maple_getcond_callback(struct maple_device *dev,
100 101
			void (*callback) (struct mapleq *mq),
			unsigned long interval, unsigned long function)
102
{
103 104 105 106
	dev->callback = callback;
	dev->interval = interval;
	dev->function = cpu_to_be32(function);
	dev->when = jiffies;
107 108 109 110 111
}
EXPORT_SYMBOL_GPL(maple_getcond_callback);

static int maple_dma_done(void)
{
112
	return (ctrl_inl(MAPLE_STATE) & 1) == 0;
113 114 115 116
}

static void maple_release_device(struct device *dev)
{
117 118 119 120 121 122 123 124 125 126 127
	struct maple_device *mdev;
	struct mapleq *mq;
	if (!dev)
		return;
	mdev = to_maple_dev(dev);
	mq = mdev->mq;
	if (mq) {
		if (mq->recvbufdcsp)
			kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
		kfree(mq);
		mq = NULL;
128
	}
129
	kfree(mdev);
130 131
}

132
/*
133
 * maple_add_packet - add a single instruction to the queue
134 135 136 137 138
 * @mdev - maple device
 * @function - function on device being queried
 * @command - maple command to add
 * @length - length of command string (in 32 bit words)
 * @data - remainder of command string
139
 */
140 141
int maple_add_packet(struct maple_device *mdev, u32 function, u32 command,
	size_t length, void *data)
142
{
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	int locking, ret = 0;
	void *sendbuf = NULL;

	mutex_lock(&maple_wlist_lock);
	/* bounce if device already locked */
	locking = mutex_is_locked(&mdev->mq->mutex);
	if (locking) {
		ret = -EBUSY;
		goto out;
	}

	mutex_lock(&mdev->mq->mutex);

	if (length) {
		sendbuf = kmalloc(length * 4, GFP_KERNEL);
		if (!sendbuf) {
			mutex_unlock(&mdev->mq->mutex);
			ret = -ENOMEM;
			goto out;
		}
		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
	}

	mdev->mq->command = command;
	mdev->mq->length = length;
	if (length > 1)
		memcpy(sendbuf + 4, data, (length - 1) * 4);
	mdev->mq->sendbuf = sendbuf;

	list_add(&mdev->mq->list, &maple_waitq);
out:
	mutex_unlock(&maple_wlist_lock);
	return ret;
176 177 178
}
EXPORT_SYMBOL_GPL(maple_add_packet);

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * maple_add_packet_sleeps - add a single instruction to the queue
 *  - waits for lock to be free
 * @mdev - maple device
 * @function - function on device being queried
 * @command - maple command to add
 * @length - length of command string (in 32 bit words)
 * @data - remainder of command string
 */
int maple_add_packet_sleeps(struct maple_device *mdev, u32 function,
	u32 command, size_t length, void *data)
{
	int locking, ret = 0;
	void *sendbuf = NULL;

	locking = mutex_lock_interruptible(&mdev->mq->mutex);
	if (locking) {
		ret = -EIO;
		goto out;
	}

	if (length) {
		sendbuf = kmalloc(length * 4, GFP_KERNEL);
		if (!sendbuf) {
			mutex_unlock(&mdev->mq->mutex);
			ret = -ENOMEM;
			goto out;
		}
		((__be32 *)sendbuf)[0] = cpu_to_be32(function);
	}

	mdev->mq->command = command;
	mdev->mq->length = length;
	if (length > 1)
		memcpy(sendbuf + 4, data, (length - 1) * 4);
	mdev->mq->sendbuf = sendbuf;

	mutex_lock(&maple_wlist_lock);
	list_add(&mdev->mq->list, &maple_waitq);
	mutex_unlock(&maple_wlist_lock);
out:
	return ret;
}
EXPORT_SYMBOL_GPL(maple_add_packet_sleeps);

224
static struct mapleq *maple_allocq(struct maple_device *mdev)
225
{
226
	struct mapleq *mq;
227

228 229
	mq = kmalloc(sizeof(*mq), GFP_KERNEL);
	if (!mq)
230
		goto failed_nomem;
231

232
	mq->dev = mdev;
233 234
	mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
	mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
235 236 237 238 239 240 241 242
	if (!mq->recvbuf)
		goto failed_p2;
	/*
	 * most devices do not need the mutex - but
	 * anything that injects block reads or writes
	 * will rely on it
	 */
	mutex_init(&mq->mutex);
243

244
	return mq;
245 246 247 248 249

failed_p2:
	kfree(mq);
failed_nomem:
	return NULL;
250 251 252 253
}

static struct maple_device *maple_alloc_dev(int port, int unit)
{
254
	struct maple_device *mdev;
255

256 257
	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
	if (!mdev)
258
		return NULL;
259

260 261 262
	mdev->port = port;
	mdev->unit = unit;
	mdev->mq = maple_allocq(mdev);
263

264 265
	if (!mdev->mq) {
		kfree(mdev);
266 267
		return NULL;
	}
268 269 270
	mdev->dev.bus = &maple_bus_type;
	mdev->dev.parent = &maple_bus;
	return mdev;
271 272 273 274
}

static void maple_free_dev(struct maple_device *mdev)
{
275 276 277
	if (!mdev)
		return;
	if (mdev->mq) {
278 279 280
		if (mdev->mq->recvbufdcsp)
			kmem_cache_free(maple_queue_cache,
				mdev->mq->recvbufdcsp);
281 282 283
		kfree(mdev->mq);
	}
	kfree(mdev);
284 285 286 287 288 289 290
}

/* process the command queue into a maple command block
 * terminating command has bit 32 of first long set to 0
 */
static void maple_build_block(struct mapleq *mq)
{
291 292
	int port, unit, from, to, len;
	unsigned long *lsendbuf = mq->sendbuf;
293

294 295 296 297 298
	port = mq->dev->port & 3;
	unit = mq->dev->unit;
	len = mq->length;
	from = port << 6;
	to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
299

300 301
	*maple_lastptr &= 0x7fffffff;
	maple_lastptr = maple_sendptr;
302

303 304 305 306 307 308
	*maple_sendptr++ = (port << 16) | len | 0x80000000;
	*maple_sendptr++ = PHYSADDR(mq->recvbuf);
	*maple_sendptr++ =
	    mq->command | (to << 8) | (from << 16) | (len << 24);
	while (len-- > 0)
		*maple_sendptr++ = *lsendbuf++;
309 310 311 312 313
}

/* build up command queue */
static void maple_send(void)
{
314
	int i, maple_packets = 0;
315 316 317 318
	struct mapleq *mq, *nmq;

	if (!list_empty(&maple_sentq))
		return;
319 320 321
	mutex_lock(&maple_wlist_lock);
	if (list_empty(&maple_waitq) || !maple_dma_done()) {
		mutex_unlock(&maple_wlist_lock);
322
		return;
323 324 325 326 327
	}
	mutex_unlock(&maple_wlist_lock);
	maple_lastptr = maple_sendbuf;
	maple_sendptr = maple_sendbuf;
	mutex_lock(&maple_wlist_lock);
328 329 330 331 332 333
	list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
		maple_build_block(mq);
		list_move(&mq->list, &maple_sentq);
		if (maple_packets++ > MAPLE_MAXPACKETS)
			break;
	}
334
	mutex_unlock(&maple_wlist_lock);
335 336 337 338 339
	if (maple_packets > 0) {
		for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
			dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
				       PAGE_SIZE, DMA_BIDIRECTIONAL);
	}
340 341
}

342 343
/* check if there is a driver registered likely to match this device */
static int check_matching_maple_driver(struct device_driver *driver,
344
					void *devptr)
345
{
346 347 348 349 350
	struct maple_driver *maple_drv;
	struct maple_device *mdev;

	mdev = devptr;
	maple_drv = to_maple_driver(driver);
351 352
	if (mdev->devinfo.function & cpu_to_be32(maple_drv->function))
		return 1;
353
	return 0;
354 355 356 357
}

static void maple_detach_driver(struct maple_device *mdev)
{
358 359
	if (!mdev)
		return;
360 361
	device_unregister(&mdev->dev);
	mdev = NULL;
362 363 364
}

/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
365
static void maple_attach_driver(struct maple_device *mdev)
366
{
367
	char *p, *recvbuf;
368 369 370
	unsigned long function;
	int matched, retval;

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	recvbuf = mdev->mq->recvbuf;
	/* copy the data as individual elements in
	* case of memory optimisation */
	memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
	memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
	memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
	memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
	memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
	memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
	memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
	memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
	memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
	mdev->product_name[30] = '\0';
	memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
	mdev->product_licence[60] = '\0';

	for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
388 389 390 391
		if (*p == ' ')
			*p = '\0';
		else
			break;
392
	for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
393 394 395 396 397
		if (*p == ' ')
			*p = '\0';
		else
			break;

A
Adrian McMenamin 已提交
398 399 400
	printk(KERN_INFO "Maple device detected: %s\n",
		mdev->product_name);
	printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
401 402

	function = be32_to_cpu(mdev->devinfo.function);
403 404 405 406

	if (function > 0x200) {
		/* Do this silently - as not a real device */
		function = 0;
407 408
		mdev->driver = &maple_dummy_driver;
		sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
409
	} else {
A
Adrian McMenamin 已提交
410 411 412
		printk(KERN_INFO
			"Maple bus at (%d, %d): Function 0x%lX\n",
			mdev->port, mdev->unit, function);
413 414

		matched =
415 416
			bus_for_each_drv(&maple_bus_type, NULL, mdev,
				check_matching_maple_driver);
417 418 419

		if (matched == 0) {
			/* Driver does not exist yet */
A
Adrian McMenamin 已提交
420 421
			printk(KERN_INFO
				"No maple driver found.\n");
422
			mdev->driver = &maple_dummy_driver;
423
		}
424 425
		sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
			mdev->unit, function);
426
	}
427 428 429
	mdev->function = function;
	mdev->dev.release = &maple_release_device;
	retval = device_register(&mdev->dev);
430 431
	if (retval) {
		printk(KERN_INFO
432 433 434 435 436 437
		"Maple bus: Attempt to register device"
		" (%x, %x) failed.\n",
		mdev->port, mdev->unit);
		maple_free_dev(mdev);
		mdev = NULL;
		return;
438
	}
439 440 441 442 443 444 445 446 447
}

/*
 * if device has been registered for the given
 * port and unit then return 1 - allows identification
 * of which devices need to be attached or detached
 */
static int detach_maple_device(struct device *device, void *portptr)
{
448 449 450 451 452 453 454 455
	struct maple_device_specify *ds;
	struct maple_device *mdev;

	ds = portptr;
	mdev = to_maple_dev(device);
	if (mdev->port == ds->port && mdev->unit == ds->unit)
		return 1;
	return 0;
456 457 458 459
}

static int setup_maple_commands(struct device *device, void *ignored)
{
460
	int add;
461 462 463 464
	struct maple_device *maple_dev = to_maple_dev(device);

	if ((maple_dev->interval > 0)
	    && time_after(jiffies, maple_dev->when)) {
465 466 467 468 469 470
		/* bounce if we cannot lock */
		add = maple_add_packet(maple_dev,
			be32_to_cpu(maple_dev->devinfo.function),
			MAPLE_COMMAND_GETCOND, 1, NULL);
		if (!add)
			maple_dev->when = jiffies + maple_dev->interval;
471
	} else {
472 473 474 475
		if (time_after(jiffies, maple_pnp_time))
			/* This will also bounce */
			maple_add_packet(maple_dev, 0,
				MAPLE_COMMAND_DEVINFO, 0, NULL);
476 477
	}
	return 0;
478 479 480 481 482
}

/* VBLANK bottom half - implemented via workqueue */
static void maple_vblank_handler(struct work_struct *work)
{
483
	if (!list_empty(&maple_sentq) || !maple_dma_done())
484
		return;
485

486
	ctrl_outl(0, MAPLE_ENABLE);
487

488 489
	bus_for_each_dev(&maple_bus_type, NULL, NULL,
			 setup_maple_commands);
490

491 492
	if (time_after(jiffies, maple_pnp_time))
		maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
493 494 495 496

	mutex_lock(&maple_wlist_lock);
	if (!list_empty(&maple_waitq) && list_empty(&maple_sentq)) {
		mutex_unlock(&maple_wlist_lock);
497
		maple_send();
498 499
	} else {
		mutex_unlock(&maple_wlist_lock);
500
	}
501

502
	maplebus_dma_reset();
503 504 505 506 507
}

/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
static void maple_map_subunits(struct maple_device *mdev, int submask)
{
508 509 510 511
	int retval, k, devcheck;
	struct maple_device *mdev_add;
	struct maple_device_specify ds;

512
	ds.port = mdev->port;
513 514 515 516 517 518 519 520 521 522 523 524 525 526
	for (k = 0; k < 5; k++) {
		ds.unit = k + 1;
		retval =
		    bus_for_each_dev(&maple_bus_type, NULL, &ds,
				     detach_maple_device);
		if (retval) {
			submask = submask >> 1;
			continue;
		}
		devcheck = submask & 0x01;
		if (devcheck) {
			mdev_add = maple_alloc_dev(mdev->port, k + 1);
			if (!mdev_add)
				return;
527 528 529
			maple_add_packet(mdev_add, 0, MAPLE_COMMAND_DEVINFO,
				0, NULL);
			/* mark that we are checking sub devices */
530 531 532 533
			scanning = 1;
		}
		submask = submask >> 1;
	}
534 535 536 537 538
}

/* mark a device as removed */
static void maple_clean_submap(struct maple_device *mdev)
{
539
	int killbit;
540

541 542 543 544
	killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
	killbit = ~killbit;
	killbit &= 0xFF;
	subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
545 546 547 548
}

/* handle empty port or hotplug removal */
static void maple_response_none(struct maple_device *mdev,
549
				struct mapleq *mq)
550
{
551 552 553 554 555 556 557 558 559
	if (mdev->unit != 0) {
		list_del(&mq->list);
		maple_clean_submap(mdev);
		printk(KERN_INFO
		       "Maple bus device detaching at (%d, %d)\n",
		       mdev->port, mdev->unit);
		maple_detach_driver(mdev);
		return;
	}
A
Adrian McMenamin 已提交
560 561 562 563 564 565
	if (!started || !fullscan) {
		if (checked[mdev->port] == false) {
			checked[mdev->port] = true;
			printk(KERN_INFO "No maple devices attached"
				" to port %d\n", mdev->port);
		}
566 567 568
		return;
	}
	maple_clean_submap(mdev);
569 570 571 572
}

/* preprocess hotplugs or scans */
static void maple_response_devinfo(struct maple_device *mdev,
573
				   char *recvbuf)
574
{
575
	char submask;
A
Adrian McMenamin 已提交
576 577 578 579 580 581 582 583
	if (!started || (scanning == 2) || !fullscan) {
		if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
			checked[mdev->port] = true;
			maple_attach_driver(mdev);
		} else {
			if (mdev->unit != 0)
				maple_attach_driver(mdev);
		}
584 585 586 587 588 589 590 591 592
		return;
	}
	if (mdev->unit == 0) {
		submask = recvbuf[2] & 0x1F;
		if (submask ^ subdevice_map[mdev->port]) {
			maple_map_subunits(mdev, submask);
			subdevice_map[mdev->port] = submask;
		}
	}
593 594
}

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
static void maple_port_rescan(void)
{
	int i;
	struct maple_device *mdev;

	fullscan = 1;
	for (i = 0; i < MAPLE_PORTS; i++) {
		if (checked[i] == false) {
			fullscan = 0;
			mdev = baseunits[i];
			/*
			 *  test lock in case scan has failed
			 *  but device is still locked
			 */
			if (mutex_is_locked(&mdev->mq->mutex))
				mutex_unlock(&mdev->mq->mutex);
			maple_add_packet(mdev, 0, MAPLE_COMMAND_DEVINFO,
				0, NULL);
		}
	}
}

617 618 619
/* maple dma end bottom half - implemented via workqueue */
static void maple_dma_handler(struct work_struct *work)
{
620 621 622 623 624 625 626 627 628 629 630 631 632
	struct mapleq *mq, *nmq;
	struct maple_device *dev;
	char *recvbuf;
	enum maple_code code;

	if (!maple_dma_done())
		return;
	ctrl_outl(0, MAPLE_ENABLE);
	if (!list_empty(&maple_sentq)) {
		list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
			recvbuf = mq->recvbuf;
			code = recvbuf[0];
			dev = mq->dev;
633 634 635 636
			kfree(mq->sendbuf);
			mutex_unlock(&mq->mutex);
			list_del_init(&mq->list);

637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			switch (code) {
			case MAPLE_RESPONSE_NONE:
				maple_response_none(dev, mq);
				break;

			case MAPLE_RESPONSE_DEVINFO:
				maple_response_devinfo(dev, recvbuf);
				break;

			case MAPLE_RESPONSE_DATATRF:
				if (dev->callback)
					dev->callback(mq);
				break;

			case MAPLE_RESPONSE_FILEERR:
			case MAPLE_RESPONSE_AGAIN:
			case MAPLE_RESPONSE_BADCMD:
			case MAPLE_RESPONSE_BADFUNC:
				printk(KERN_DEBUG
				       "Maple non-fatal error 0x%X\n",
				       code);
				break;

			case MAPLE_RESPONSE_ALLINFO:
				printk(KERN_DEBUG
662 663
				       "Maple - extended device information"
					" not supported\n");
664 665 666 667 668 669 670 671 672
				break;

			case MAPLE_RESPONSE_OK:
				break;

			default:
				break;
			}
		}
673
		/* if scanning is 1 then we have subdevices to check */
674 675 676 677 678
		if (scanning == 1) {
			maple_send();
			scanning = 2;
		} else
			scanning = 0;
679 680 681 682
		/*check if we have actually tested all ports yet */
		if (!fullscan)
			maple_port_rescan();
		/* mark that we have been through the first scan */
683 684 685 686
		if (started == 0)
			started = 1;
	}
	maplebus_dma_reset();
687 688 689 690
}

static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
{
691 692 693
	/* Load everything into the bottom half */
	schedule_work(&maple_dma_process);
	return IRQ_HANDLED;
694 695 696 697
}

static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
{
698 699
	schedule_work(&maple_vblank_process);
	return IRQ_HANDLED;
700 701 702 703
}

static int maple_set_dma_interrupt_handler(void)
{
704 705
	return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
		IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
706 707 708 709
}

static int maple_set_vblank_interrupt_handler(void)
{
710 711
	return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
		IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
712 713 714 715
}

static int maple_get_dma_buffer(void)
{
716 717 718 719 720 721
	maple_sendbuf =
	    (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
				      MAPLE_DMA_PAGES);
	if (!maple_sendbuf)
		return -ENOMEM;
	return 0;
722 723 724
}

static int match_maple_bus_driver(struct device *devptr,
725
				  struct device_driver *drvptr)
726
{
727 728 729 730 731 732 733 734 735
	struct maple_driver *maple_drv;
	struct maple_device *maple_dev;

	maple_drv = container_of(drvptr, struct maple_driver, drv);
	maple_dev = container_of(devptr, struct maple_device, dev);
	/* Trap empty port case */
	if (maple_dev->devinfo.function == 0xFFFFFFFF)
		return 0;
	else if (maple_dev->devinfo.function &
736
		 cpu_to_be32(maple_drv->function))
737 738
		return 1;
	return 0;
739 740
}

741 742
static int maple_bus_uevent(struct device *dev,
			    struct kobj_uevent_env *env)
743
{
744
	return 0;
745 746 747 748 749 750 751
}

static void maple_bus_release(struct device *dev)
{
}

static struct maple_driver maple_dummy_driver = {
752 753 754
	.drv = {
		.name = "maple_dummy_driver",
		.bus = &maple_bus_type,
755
	},
756 757 758
};

struct bus_type maple_bus_type = {
759 760 761
	.name = "maple",
	.match = match_maple_bus_driver,
	.uevent = maple_bus_uevent,
762 763 764 765
};
EXPORT_SYMBOL_GPL(maple_bus_type);

static struct device maple_bus = {
766 767
	.bus_id = "maple",
	.release = maple_bus_release,
768 769 770 771
};

static int __init maple_bus_init(void)
{
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
	int retval, i;
	struct maple_device *mdev[MAPLE_PORTS];
	ctrl_outl(0, MAPLE_STATE);

	retval = device_register(&maple_bus);
	if (retval)
		goto cleanup;

	retval = bus_register(&maple_bus_type);
	if (retval)
		goto cleanup_device;

	retval = driver_register(&maple_dummy_driver.drv);
	if (retval)
		goto cleanup_bus;

	/* allocate memory for maple bus dma */
	retval = maple_get_dma_buffer();
	if (retval) {
		printk(KERN_INFO
		       "Maple bus: Failed to allocate Maple DMA buffers\n");
		goto cleanup_basic;
	}

	/* set up DMA interrupt handler */
	retval = maple_set_dma_interrupt_handler();
	if (retval) {
		printk(KERN_INFO
		       "Maple bus: Failed to grab maple DMA IRQ\n");
		goto cleanup_dma;
	}

	/* set up VBLANK interrupt handler */
	retval = maple_set_vblank_interrupt_handler();
	if (retval) {
		printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
		goto cleanup_irq;
	}

	maple_queue_cache =
	    kmem_cache_create("maple_queue_cache", 0x400, 0,
813
			      SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
814 815 816 817

	if (!maple_queue_cache)
		goto cleanup_bothirqs;

818 819 820
	INIT_LIST_HEAD(&maple_waitq);
	INIT_LIST_HEAD(&maple_sentq);

821 822
	/* setup maple ports */
	for (i = 0; i < MAPLE_PORTS; i++) {
A
Adrian McMenamin 已提交
823
		checked[i] = false;
824
		mdev[i] = maple_alloc_dev(i, 0);
A
Adrian McMenamin 已提交
825
		baseunits[i] = mdev[i];
826 827 828 829 830
		if (!mdev[i]) {
			while (i-- > 0)
				maple_free_dev(mdev[i]);
			goto cleanup_cache;
		}
831
		maple_add_packet(mdev[i], 0, MAPLE_COMMAND_DEVINFO, 0, NULL);
832 833 834 835 836 837 838 839 840 841 842 843
		subdevice_map[i] = 0;
	}

	/* setup maplebus hardware */
	maplebus_dma_reset();
	/* initial detection */
	maple_send();
	maple_pnp_time = jiffies;
	printk(KERN_INFO "Maple bus core now registered.\n");

	return 0;

844
cleanup_cache:
845 846
	kmem_cache_destroy(maple_queue_cache);

847
cleanup_bothirqs:
848 849
	free_irq(HW_EVENT_VSYNC, 0);

850
cleanup_irq:
851 852
	free_irq(HW_EVENT_MAPLE_DMA, 0);

853
cleanup_dma:
854 855
	free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);

856
cleanup_basic:
857 858
	driver_unregister(&maple_dummy_driver.drv);

859
cleanup_bus:
860 861
	bus_unregister(&maple_bus_type);

862
cleanup_device:
863 864
	device_unregister(&maple_bus);

865
cleanup:
866 867
	printk(KERN_INFO "Maple bus registration failed\n");
	return retval;
868
}
869 870
/* Push init to later to ensure hardware gets detected */
fs_initcall(maple_bus_init);