spu_base.c 17.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Low-level SPU handling
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23
#undef DEBUG
24 25 26 27 28 29 30 31 32 33 34

#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>

#include <asm/io.h>
#include <asm/prom.h>
35
#include <linux/mutex.h>
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#include <asm/spu.h>
#include <asm/mmu_context.h>

#include "interrupt.h"

static int __spu_trap_invalid_dma(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	force_sig(SIGBUS, /* info, */ current);
	return 0;
}

static int __spu_trap_dma_align(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	force_sig(SIGBUS, /* info, */ current);
	return 0;
}

static int __spu_trap_error(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	force_sig(SIGILL, /* info, */ current);
	return 0;
}

static void spu_restart_dma(struct spu *spu)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;
65

66
	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
67
		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
68 69 70 71
}

static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
72 73 74
	struct spu_priv2 __iomem *priv2 = spu->priv2;
	struct mm_struct *mm = spu->mm;
	u64 esid, vsid;
75 76 77

	pr_debug("%s\n", __FUNCTION__);

78
	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
79 80 81
		/* SLBs are pre-loaded for context switch, so
		 * we should never get here!
		 */
82 83 84
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
85 86 87 88
	if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
		/* Future: support kernel segments so that drivers
		 * can use SPUs.
		 */
89 90 91 92
		pr_debug("invalid region access at %016lx\n", ea);
		return 1;
	}

93 94 95 96
	esid = (ea & ESID_MASK) | SLB_ESID_V;
	vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
	if (in_hugepage_area(mm->context, ea))
		vsid |= SLB_VSID_L;
97

98 99 100 101 102
	out_be64(&priv2->slb_index_W, spu->slb_replace);
	out_be64(&priv2->slb_vsid_RW, vsid);
	out_be64(&priv2->slb_esid_RW, esid);

	spu->slb_replace++;
103 104 105 106 107 108 109 110
	if (spu->slb_replace >= 8)
		spu->slb_replace = 0;

	spu_restart_dma(spu);

	return 0;
}

111
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
112
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
113
{
114
	pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
115

116 117 118 119 120 121 122 123 124
	/* Handle kernel space hash faults immediately.
	   User hash faults need to be deferred to process context. */
	if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
	    && REGION_ID(ea) != USER_REGION_ID
	    && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
		spu_restart_dma(spu);
		return 0;
	}

125
	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
126 127 128
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
129

130 131 132
	spu->dar = ea;
	spu->dsisr = dsisr;
	mb();
133 134
	if (spu->stop_callback)
		spu->stop_callback(spu);
135 136 137 138 139
	return 0;
}

static int __spu_trap_mailbox(struct spu *spu)
{
140 141
	if (spu->ibox_callback)
		spu->ibox_callback(spu);
142 143 144

	/* atomically disable SPU mailbox interrupts */
	spin_lock(&spu->register_lock);
145
	spu_int_mask_and(spu, 2, ~0x1);
146 147 148 149 150 151 152 153
	spin_unlock(&spu->register_lock);
	return 0;
}

static int __spu_trap_stop(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	spu->stop_code = in_be32(&spu->problem->spu_status_R);
154 155
	if (spu->stop_callback)
		spu->stop_callback(spu);
156 157 158 159 160 161 162
	return 0;
}

static int __spu_trap_halt(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
	spu->stop_code = in_be32(&spu->problem->spu_status_R);
163 164
	if (spu->stop_callback)
		spu->stop_callback(spu);
165 166 167 168 169 170
	return 0;
}

static int __spu_trap_tag_group(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
171
	spu->mfc_callback(spu);
172 173 174 175 176
	return 0;
}

static int __spu_trap_spubox(struct spu *spu)
{
177 178
	if (spu->wbox_callback)
		spu->wbox_callback(spu);
179 180 181

	/* atomically disable SPU mailbox interrupts */
	spin_lock(&spu->register_lock);
182
	spu_int_mask_and(spu, 2, ~0x10);
183 184 185 186 187 188 189 190 191 192 193
	spin_unlock(&spu->register_lock);
	return 0;
}

static irqreturn_t
spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;

	spu = data;
	spu->class_0_pending = 1;
194 195
	if (spu->stop_callback)
		spu->stop_callback(spu);
196 197 198 199

	return IRQ_HANDLED;
}

200
int
201 202
spu_irq_class_0_bottom(struct spu *spu)
{
203
	unsigned long stat, mask;
204 205 206

	spu->class_0_pending = 0;

207 208
	mask = spu_int_mask_get(spu, 0);
	stat = spu_int_stat_get(spu, 0);
209

210 211
	stat &= mask;

212 213 214 215 216 217 218 219 220
	if (stat & 1) /* invalid MFC DMA */
		__spu_trap_invalid_dma(spu);

	if (stat & 2) /* invalid DMA alignment */
		__spu_trap_dma_align(spu);

	if (stat & 4) /* error on SPU */
		__spu_trap_error(spu);

221
	spu_int_stat_clear(spu, 0, stat);
222 223

	return (stat & 0x7) ? -EIO : 0;
224
}
225
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
226 227 228 229 230

static irqreturn_t
spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;
231
	unsigned long stat, mask, dar, dsisr;
232 233

	spu = data;
234 235 236

	/* atomically read & clear class1 status. */
	spin_lock(&spu->register_lock);
237 238 239 240
	mask  = spu_int_mask_get(spu, 1);
	stat  = spu_int_stat_get(spu, 1) & mask;
	dar   = spu_mfc_dar_get(spu);
	dsisr = spu_mfc_dsisr_get(spu);
241
	if (stat & 2) /* mapping fault */
242 243
		spu_mfc_dsisr_set(spu, 0ul);
	spu_int_stat_clear(spu, 1, stat);
244
	spin_unlock(&spu->register_lock);
245 246
	pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
			dar, dsisr);
247 248 249 250 251

	if (stat & 1) /* segment fault */
		__spu_trap_data_seg(spu, dar);

	if (stat & 2) { /* mapping fault */
252
		__spu_trap_data_map(spu, dar, dsisr);
253 254 255 256 257 258 259 260 261 262
	}

	if (stat & 4) /* ls compare & suspend on get */
		;

	if (stat & 8) /* ls compare & suspend on put */
		;

	return stat ? IRQ_HANDLED : IRQ_NONE;
}
263
EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
264 265 266 267 268 269

static irqreturn_t
spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;
	unsigned long stat;
270
	unsigned long mask;
271 272

	spu = data;
273 274
	stat = spu_int_stat_get(spu, 2);
	mask = spu_int_mask_get(spu, 2);
275

276
	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
277

278
	stat &= mask;
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	if (stat & 1)  /* PPC core mailbox */
		__spu_trap_mailbox(spu);

	if (stat & 2) /* SPU stop-and-signal */
		__spu_trap_stop(spu);

	if (stat & 4) /* SPU halted */
		__spu_trap_halt(spu);

	if (stat & 8) /* DMA tag group complete */
		__spu_trap_tag_group(spu);

	if (stat & 0x10) /* SPU mailbox threshold */
		__spu_trap_spubox(spu);

295
	spu_int_stat_clear(spu, 2, stat);
296 297 298 299 300 301 302 303 304 305 306 307 308
	return stat ? IRQ_HANDLED : IRQ_NONE;
}

static int
spu_request_irqs(struct spu *spu)
{
	int ret;
	int irq_base;

	irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;

	snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
	ret = request_irq(irq_base + spu->isrc,
309
		 spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu);
310 311 312 313 314
	if (ret)
		goto out;

	snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
	ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
315
		 spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu);
316 317 318 319 320
	if (ret)
		goto out1;

	snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
	ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
321
		 spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	if (ret)
		goto out2;
	goto out;

out2:
	free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
out1:
	free_irq(irq_base + spu->isrc, spu);
out:
	return ret;
}

static void
spu_free_irqs(struct spu *spu)
{
	int irq_base;

	irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;

	free_irq(irq_base + spu->isrc, spu);
	free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
	free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
}

static LIST_HEAD(spu_list);
347
static DEFINE_MUTEX(spu_mutex);
348 349 350 351 352 353 354 355 356 357 358 359 360 361

static void spu_init_channels(struct spu *spu)
{
	static const struct {
		 unsigned channel;
		 unsigned count;
	} zero_list[] = {
		{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
		{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
	}, count_list[] = {
		{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
		{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
		{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
	};
362
	struct spu_priv2 __iomem *priv2;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	int i;

	priv2 = spu->priv2;

	/* initialize all channel data to zero */
	for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
		int count;

		out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
		for (count = 0; count < zero_list[i].count; count++)
			out_be64(&priv2->spu_chnldata_RW, 0);
	}

	/* initialize channel counts to meaningful values */
	for (i = 0; i < ARRAY_SIZE(count_list); i++) {
		out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
		out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
	}
}

struct spu *spu_alloc(void)
{
	struct spu *spu;

387
	mutex_lock(&spu_mutex);
388 389 390 391 392 393 394 395
	if (!list_empty(&spu_list)) {
		spu = list_entry(spu_list.next, struct spu, list);
		list_del_init(&spu->list);
		pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
	} else {
		pr_debug("No SPU left\n");
		spu = NULL;
	}
396
	mutex_unlock(&spu_mutex);
397

398
	if (spu)
399 400 401 402
		spu_init_channels(spu);

	return spu;
}
403
EXPORT_SYMBOL_GPL(spu_alloc);
404 405 406

void spu_free(struct spu *spu)
{
407
	mutex_lock(&spu_mutex);
408
	list_add_tail(&spu->list, &spu_list);
409
	mutex_unlock(&spu_mutex);
410
}
411
EXPORT_SYMBOL_GPL(spu_free);
412 413 414 415 416 417 418 419

static int spu_handle_mm_fault(struct spu *spu)
{
	struct mm_struct *mm = spu->mm;
	struct vm_area_struct *vma;
	u64 ea, dsisr, is_write;
	int ret;

420 421
	ea = spu->dar;
	dsisr = spu->dsisr;
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
#if 0
	if (!IS_VALID_EA(ea)) {
		return -EFAULT;
	}
#endif /* XXX */
	if (mm == NULL) {
		return -EFAULT;
	}
	if (mm->pgd == NULL) {
		return -EFAULT;
	}

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ea);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= ea)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
#if 0
	if (expand_stack(vma, ea))
		goto bad_area;
#endif /* XXX */
good_area:
	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (dsisr & MFC_DSISR_ACCESS_DENIED)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
	ret = 0;
	switch (handle_mm_fault(mm, vma, ea, is_write)) {
	case VM_FAULT_MINOR:
		current->min_flt++;
		break;
	case VM_FAULT_MAJOR:
		current->maj_flt++;
		break;
	case VM_FAULT_SIGBUS:
		ret = -EFAULT;
		goto bad_area;
	case VM_FAULT_OOM:
		ret = -ENOMEM;
		goto bad_area;
	default:
		BUG();
	}
	up_read(&mm->mmap_sem);
	return ret;

bad_area:
	up_read(&mm->mmap_sem);
	return -EFAULT;
}

482
int spu_irq_class_1_bottom(struct spu *spu)
483 484 485 486
{
	u64 ea, dsisr, access, error = 0UL;
	int ret = 0;

487 488
	ea = spu->dar;
	dsisr = spu->dsisr;
489
	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
490 491
		u64 flags;

492 493
		access = (_PAGE_PRESENT | _PAGE_USER);
		access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
494
		local_irq_save(flags);
495 496
		if (hash_page(ea, access, 0x300) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
497
		local_irq_restore(flags);
498
	}
499
	if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
500 501 502 503 504
		if ((ret = spu_handle_mm_fault(spu)) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
		else
			error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
	}
505 506 507
	spu->dar = 0UL;
	spu->dsisr = 0UL;
	if (!error) {
508
		spu_restart_dma(spu);
509 510 511
	} else {
		__spu_trap_invalid_dma(spu);
	}
512 513 514
	return ret;
}

515 516 517 518 519 520 521 522
void spu_irq_setaffinity(struct spu *spu, int cpu)
{
	u64 target = iic_get_target_id(cpu);
	u64 route = target << 48 | target << 32 | target << 16;
	spu_int_route_set(spu, route);
}
EXPORT_SYMBOL_GPL(spu_irq_setaffinity);

523 524 525 526 527 528 529 530 531
static int __init find_spu_node_id(struct device_node *spe)
{
	unsigned int *id;
	struct device_node *cpu;
	cpu = spe->parent->parent;
	id = (unsigned int *)get_property(cpu, "node-id", NULL);
	return id ? *id : 0;
}

532 533
static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
		const char *prop)
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
{
	static DEFINE_MUTEX(add_spumem_mutex);

	struct address_prop {
		unsigned long address;
		unsigned int len;
	} __attribute__((packed)) *p;
	int proplen;

	unsigned long start_pfn, nr_pages;
	struct pglist_data *pgdata;
	struct zone *zone;
	int ret;

	p = (void*)get_property(spe, prop, &proplen);
	WARN_ON(proplen != sizeof (*p));

	start_pfn = p->address >> PAGE_SHIFT;
	nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;

554
	pgdata = NODE_DATA(spu->nid);
555 556 557 558 559 560 561 562 563 564
	zone = pgdata->node_zones;

	/* XXX rethink locking here */
	mutex_lock(&add_spumem_mutex);
	ret = __add_pages(zone, start_pfn, nr_pages);
	mutex_unlock(&add_spumem_mutex);

	return ret;
}

565 566
static void __iomem * __init map_spe_prop(struct spu *spu,
		struct device_node *n, const char *name)
567 568 569 570 571 572 573 574
{
	struct address_prop {
		unsigned long address;
		unsigned int len;
	} __attribute__((packed)) *prop;

	void *p;
	int proplen;
575 576
	void* ret = NULL;
	int err = 0;
577 578 579 580 581 582 583

	p = get_property(n, name, &proplen);
	if (proplen != sizeof (struct address_prop))
		return NULL;

	prop = p;

584
	err = cell_spuprop_present(spu, n, name);
585 586 587 588 589 590 591
	if (err && (err != -EEXIST))
		goto out;

	ret = ioremap(prop->address, prop->len);

 out:
	return ret;
592 593 594 595 596 597 598 599 600 601
}

static void spu_unmap(struct spu *spu)
{
	iounmap(spu->priv2);
	iounmap(spu->priv1);
	iounmap(spu->problem);
	iounmap((u8 __iomem *)spu->local_store);
}

602
static int __init spu_map_device(struct spu *spu, struct device_node *node)
603 604 605 606 607
{
	char *prop;
	int ret;

	ret = -ENODEV;
608
	prop = get_property(node, "isrc", NULL);
609 610 611 612
	if (!prop)
		goto out;
	spu->isrc = *(unsigned int *)prop;

613
	spu->name = get_property(node, "name", NULL);
614 615 616
	if (!spu->name)
		goto out;

617
	prop = get_property(node, "local-store", NULL);
618 619 620 621 622
	if (!prop)
		goto out;
	spu->local_store_phys = *(unsigned long *)prop;

	/* we use local store as ram, not io memory */
623 624
	spu->local_store = (void __force *)
		map_spe_prop(spu, node, "local-store");
625 626 627
	if (!spu->local_store)
		goto out;

628
	prop = get_property(node, "problem", NULL);
629 630 631 632
	if (!prop)
		goto out_unmap;
	spu->problem_phys = *(unsigned long *)prop;

633
	spu->problem= map_spe_prop(spu, node, "problem");
634 635 636
	if (!spu->problem)
		goto out_unmap;

637
	spu->priv1= map_spe_prop(spu, node, "priv1");
638
	/* priv1 is not available on a hypervisor */
639

640
	spu->priv2= map_spe_prop(spu, node, "priv2");
641 642 643 644 645 646 647 648 649 650 651
	if (!spu->priv2)
		goto out_unmap;
	ret = 0;
	goto out;

out_unmap:
	spu_unmap(spu);
out:
	return ret;
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
struct sysdev_class spu_sysdev_class = {
	set_kset_name("spu")
};

static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
{
	struct spu *spu = container_of(sysdev, struct spu, sysdev);
	return sprintf(buf, "%d\n", spu->isrc);

}
static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);

extern int attach_sysdev_to_node(struct sys_device *dev, int nid);

static int spu_create_sysdev(struct spu *spu)
{
	int ret;

	spu->sysdev.id = spu->number;
	spu->sysdev.cls = &spu_sysdev_class;
	ret = sysdev_register(&spu->sysdev);
	if (ret) {
		printk(KERN_ERR "Can't register SPU %d with sysfs\n",
				spu->number);
		return ret;
	}

	sysdev_create_file(&spu->sysdev, &attr_isrc);
	sysfs_add_device_to_node(&spu->sysdev, spu->nid);

	return 0;
}

static void spu_destroy_sysdev(struct spu *spu)
{
	sysdev_remove_file(&spu->sysdev, &attr_isrc);
	sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
	sysdev_unregister(&spu->sysdev);
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
static int __init create_spu(struct device_node *spe)
{
	struct spu *spu;
	int ret;
	static int number;

	ret = -ENOMEM;
	spu = kmalloc(sizeof (*spu), GFP_KERNEL);
	if (!spu)
		goto out;

	ret = spu_map_device(spu, spe);
	if (ret)
		goto out_free;

	spu->node = find_spu_node_id(spe);
708 709 710 711
	spu->nid = of_node_to_nid(spe);
	if (spu->nid == -1)
		spu->nid = 0;

712 713 714
	spu->stop_code = 0;
	spu->slb_replace = 0;
	spu->mm = NULL;
715 716 717
	spu->ctx = NULL;
	spu->rq = NULL;
	spu->pid = 0;
718
	spu->class_0_pending = 0;
719
	spu->flags = 0UL;
720 721
	spu->dar = 0UL;
	spu->dsisr = 0UL;
722 723
	spin_lock_init(&spu->register_lock);

724 725
	spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
	spu_mfc_sr1_set(spu, 0x33);
726

727 728
	spu->ibox_callback = NULL;
	spu->wbox_callback = NULL;
729
	spu->stop_callback = NULL;
730
	spu->mfc_callback = NULL;
731

732
	mutex_lock(&spu_mutex);
733 734 735 736 737
	spu->number = number++;
	ret = spu_request_irqs(spu);
	if (ret)
		goto out_unmap;

738 739 740 741
	ret = spu_create_sysdev(spu);
	if (ret)
		goto out_free_irqs;

742
	list_add(&spu->list, &spu_list);
743
	mutex_unlock(&spu_mutex);
744 745 746 747 748 749

	pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
		spu->name, spu->isrc, spu->local_store,
		spu->problem, spu->priv1, spu->priv2, spu->number);
	goto out;

750 751 752
out_free_irqs:
	spu_free_irqs(spu);

753
out_unmap:
754
	mutex_unlock(&spu_mutex);
755 756 757 758 759 760 761 762 763 764 765
	spu_unmap(spu);
out_free:
	kfree(spu);
out:
	return ret;
}

static void destroy_spu(struct spu *spu)
{
	list_del_init(&spu->list);

766
	spu_destroy_sysdev(spu);
767 768 769 770 771 772 773 774
	spu_free_irqs(spu);
	spu_unmap(spu);
	kfree(spu);
}

static void cleanup_spu_base(void)
{
	struct spu *spu, *tmp;
775
	mutex_lock(&spu_mutex);
776 777
	list_for_each_entry_safe(spu, tmp, &spu_list, list)
		destroy_spu(spu);
778
	mutex_unlock(&spu_mutex);
779
	sysdev_class_unregister(&spu_sysdev_class);
780 781 782 783 784 785 786 787
}
module_exit(cleanup_spu_base);

static int __init init_spu_base(void)
{
	struct device_node *node;
	int ret;

788 789 790 791 792
	/* create sysdev class for spus */
	ret = sysdev_class_register(&spu_sysdev_class);
	if (ret)
		return ret;

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	ret = -ENODEV;
	for (node = of_find_node_by_type(NULL, "spe");
			node; node = of_find_node_by_type(node, "spe")) {
		ret = create_spu(node);
		if (ret) {
			printk(KERN_WARNING "%s: Error initializing %s\n",
				__FUNCTION__, node->name);
			cleanup_spu_base();
			break;
		}
	}
	/* in some old firmware versions, the spe is called 'spc', so we
	   look for that as well */
	for (node = of_find_node_by_type(NULL, "spc");
			node; node = of_find_node_by_type(node, "spc")) {
		ret = create_spu(node);
		if (ret) {
			printk(KERN_WARNING "%s: Error initializing %s\n",
				__FUNCTION__, node->name);
			cleanup_spu_base();
			break;
		}
	}
	return ret;
}
module_init(init_spu_base);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");