spu_base.c 19.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Low-level SPU handling
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23
#undef DEBUG
24 25 26 27

#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
28
#include <linux/pci.h>
29 30 31 32 33
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>

34
#include <asm/firmware.h>
35 36
#include <asm/io.h>
#include <asm/prom.h>
37
#include <linux/mutex.h>
38
#include <asm/spu.h>
39
#include <asm/spu_priv1.h>
40 41 42 43
#include <asm/mmu_context.h>

#include "interrupt.h"

44 45 46 47
const struct spu_priv1_ops *spu_priv1_ops;

EXPORT_SYMBOL_GPL(spu_priv1_ops);

48 49 50
static int __spu_trap_invalid_dma(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
51
	spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
52 53 54 55 56 57
	return 0;
}

static int __spu_trap_dma_align(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
58
	spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
59 60 61 62 63 64
	return 0;
}

static int __spu_trap_error(struct spu *spu)
{
	pr_debug("%s\n", __FUNCTION__);
65
	spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
66 67 68 69 70 71
	return 0;
}

static void spu_restart_dma(struct spu *spu)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;
72

73
	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
74
		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
75 76 77 78
}

static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
79 80
	struct spu_priv2 __iomem *priv2 = spu->priv2;
	struct mm_struct *mm = spu->mm;
81
	u64 esid, vsid, llp;
82 83 84

	pr_debug("%s\n", __FUNCTION__);

85
	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
86 87 88
		/* SLBs are pre-loaded for context switch, so
		 * we should never get here!
		 */
89 90 91
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
92 93 94 95
	if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
		/* Future: support kernel segments so that drivers
		 * can use SPUs.
		 */
96 97 98 99
		pr_debug("invalid region access at %016lx\n", ea);
		return 1;
	}

100
	esid = (ea & ESID_MASK) | SLB_ESID_V;
101
#ifdef CONFIG_HUGETLB_PAGE
102
	if (in_hugepage_area(mm->context, ea))
103 104 105 106 107 108
		llp = mmu_psize_defs[mmu_huge_psize].sllp;
	else
#endif
		llp = mmu_psize_defs[mmu_virtual_psize].sllp;
	vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
			SLB_VSID_USER | llp;
109

110 111 112 113 114
	out_be64(&priv2->slb_index_W, spu->slb_replace);
	out_be64(&priv2->slb_vsid_RW, vsid);
	out_be64(&priv2->slb_esid_RW, esid);

	spu->slb_replace++;
115 116 117 118 119 120 121 122
	if (spu->slb_replace >= 8)
		spu->slb_replace = 0;

	spu_restart_dma(spu);

	return 0;
}

123
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
124
static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
125
{
126
	pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
127

128 129 130 131 132 133 134 135 136
	/* Handle kernel space hash faults immediately.
	   User hash faults need to be deferred to process context. */
	if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
	    && REGION_ID(ea) != USER_REGION_ID
	    && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
		spu_restart_dma(spu);
		return 0;
	}

137
	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
138 139 140
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
141

142 143 144
	spu->dar = ea;
	spu->dsisr = dsisr;
	mb();
145
	spu->stop_callback(spu);
146 147 148 149 150 151 152 153 154 155
	return 0;
}

static irqreturn_t
spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;

	spu = data;
	spu->class_0_pending = 1;
156
	spu->stop_callback(spu);
157 158 159 160

	return IRQ_HANDLED;
}

161
int
162 163
spu_irq_class_0_bottom(struct spu *spu)
{
164
	unsigned long stat, mask;
165 166 167

	spu->class_0_pending = 0;

168 169
	mask = spu_int_mask_get(spu, 0);
	stat = spu_int_stat_get(spu, 0);
170

171 172
	stat &= mask;

173
	if (stat & 1) /* invalid DMA alignment */
174 175
		__spu_trap_dma_align(spu);

176 177 178
	if (stat & 2) /* invalid MFC DMA */
		__spu_trap_invalid_dma(spu);

179 180 181
	if (stat & 4) /* error on SPU */
		__spu_trap_error(spu);

182
	spu_int_stat_clear(spu, 0, stat);
183 184

	return (stat & 0x7) ? -EIO : 0;
185
}
186
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
187 188 189 190 191

static irqreturn_t
spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;
192
	unsigned long stat, mask, dar, dsisr;
193 194

	spu = data;
195 196 197

	/* atomically read & clear class1 status. */
	spin_lock(&spu->register_lock);
198 199 200 201
	mask  = spu_int_mask_get(spu, 1);
	stat  = spu_int_stat_get(spu, 1) & mask;
	dar   = spu_mfc_dar_get(spu);
	dsisr = spu_mfc_dsisr_get(spu);
202
	if (stat & 2) /* mapping fault */
203 204
		spu_mfc_dsisr_set(spu, 0ul);
	spu_int_stat_clear(spu, 1, stat);
205
	spin_unlock(&spu->register_lock);
206 207
	pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
			dar, dsisr);
208 209 210 211 212

	if (stat & 1) /* segment fault */
		__spu_trap_data_seg(spu, dar);

	if (stat & 2) { /* mapping fault */
213
		__spu_trap_data_map(spu, dar, dsisr);
214 215 216 217 218 219 220 221 222 223
	}

	if (stat & 4) /* ls compare & suspend on get */
		;

	if (stat & 8) /* ls compare & suspend on put */
		;

	return stat ? IRQ_HANDLED : IRQ_NONE;
}
224
EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
225 226 227 228 229 230

static irqreturn_t
spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
{
	struct spu *spu;
	unsigned long stat;
231
	unsigned long mask;
232 233

	spu = data;
234
	spin_lock(&spu->register_lock);
235 236
	stat = spu_int_stat_get(spu, 2);
	mask = spu_int_mask_get(spu, 2);
237 238 239 240 241 242 243 244 245 246 247
	/* ignore interrupts we're not waiting for */
	stat &= mask;
	/*
	 * mailbox interrupts (0x1 and 0x10) are level triggered.
	 * mask them now before acknowledging.
	 */
	if (stat & 0x11)
		spu_int_mask_and(spu, 2, ~(stat & 0x11));
	/* acknowledge all interrupts before the callbacks */
	spu_int_stat_clear(spu, 2, stat);
	spin_unlock(&spu->register_lock);
248

249
	pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
250 251

	if (stat & 1)  /* PPC core mailbox */
252
		spu->ibox_callback(spu);
253 254

	if (stat & 2) /* SPU stop-and-signal */
255
		spu->stop_callback(spu);
256 257

	if (stat & 4) /* SPU halted */
258
		spu->stop_callback(spu);
259 260

	if (stat & 8) /* DMA tag group complete */
261
		spu->mfc_callback(spu);
262 263

	if (stat & 0x10) /* SPU mailbox threshold */
264
		spu->wbox_callback(spu);
265 266 267 268

	return stat ? IRQ_HANDLED : IRQ_NONE;
}

269
static int spu_request_irqs(struct spu *spu)
270
{
271
	int ret = 0;
272

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	if (spu->irqs[0] != NO_IRQ) {
		snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
			 spu->number);
		ret = request_irq(spu->irqs[0], spu_irq_class_0,
				  IRQF_DISABLED,
				  spu->irq_c0, spu);
		if (ret)
			goto bail0;
	}
	if (spu->irqs[1] != NO_IRQ) {
		snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
			 spu->number);
		ret = request_irq(spu->irqs[1], spu_irq_class_1,
				  IRQF_DISABLED,
				  spu->irq_c1, spu);
		if (ret)
			goto bail1;
	}
	if (spu->irqs[2] != NO_IRQ) {
		snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
			 spu->number);
		ret = request_irq(spu->irqs[2], spu_irq_class_2,
				  IRQF_DISABLED,
				  spu->irq_c2, spu);
		if (ret)
			goto bail2;
	}
	return 0;
301

302 303 304 305 306 307 308
bail2:
	if (spu->irqs[1] != NO_IRQ)
		free_irq(spu->irqs[1], spu);
bail1:
	if (spu->irqs[0] != NO_IRQ)
		free_irq(spu->irqs[0], spu);
bail0:
309 310 311
	return ret;
}

312
static void spu_free_irqs(struct spu *spu)
313
{
314 315 316 317 318 319
	if (spu->irqs[0] != NO_IRQ)
		free_irq(spu->irqs[0], spu);
	if (spu->irqs[1] != NO_IRQ)
		free_irq(spu->irqs[1], spu);
	if (spu->irqs[2] != NO_IRQ)
		free_irq(spu->irqs[2], spu);
320 321
}

322
static struct list_head spu_list[MAX_NUMNODES];
323
static DEFINE_MUTEX(spu_mutex);
324 325 326 327 328 329 330 331 332 333 334 335 336 337

static void spu_init_channels(struct spu *spu)
{
	static const struct {
		 unsigned channel;
		 unsigned count;
	} zero_list[] = {
		{ 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
		{ 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
	}, count_list[] = {
		{ 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
		{ 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
		{ 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
	};
338
	struct spu_priv2 __iomem *priv2;
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	int i;

	priv2 = spu->priv2;

	/* initialize all channel data to zero */
	for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
		int count;

		out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
		for (count = 0; count < zero_list[i].count; count++)
			out_be64(&priv2->spu_chnldata_RW, 0);
	}

	/* initialize channel counts to meaningful values */
	for (i = 0; i < ARRAY_SIZE(count_list); i++) {
		out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
		out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
	}
}

359
struct spu *spu_alloc_node(int node)
360
{
361
	struct spu *spu = NULL;
362

363
	mutex_lock(&spu_mutex);
364 365
	if (!list_empty(&spu_list[node])) {
		spu = list_entry(spu_list[node].next, struct spu, list);
366
		list_del_init(&spu->list);
367 368 369
		pr_debug("Got SPU %x %d %d\n",
			 spu->isrc, spu->number, spu->node);
		spu_init_channels(spu);
370
	}
371
	mutex_unlock(&spu_mutex);
372

373 374 375 376 377 378 379 380 381 382 383 384 385 386
	return spu;
}
EXPORT_SYMBOL_GPL(spu_alloc_node);

struct spu *spu_alloc(void)
{
	struct spu *spu = NULL;
	int node;

	for (node = 0; node < MAX_NUMNODES; node++) {
		spu = spu_alloc_node(node);
		if (spu)
			break;
	}
387 388 389 390 391 392

	return spu;
}

void spu_free(struct spu *spu)
{
393
	mutex_lock(&spu_mutex);
394
	list_add_tail(&spu->list, &spu_list[spu->node]);
395
	mutex_unlock(&spu_mutex);
396
}
397
EXPORT_SYMBOL_GPL(spu_free);
398 399 400 401 402 403 404 405

static int spu_handle_mm_fault(struct spu *spu)
{
	struct mm_struct *mm = spu->mm;
	struct vm_area_struct *vma;
	u64 ea, dsisr, is_write;
	int ret;

406 407
	ea = spu->dar;
	dsisr = spu->dsisr;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
#if 0
	if (!IS_VALID_EA(ea)) {
		return -EFAULT;
	}
#endif /* XXX */
	if (mm == NULL) {
		return -EFAULT;
	}
	if (mm->pgd == NULL) {
		return -EFAULT;
	}

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ea);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= ea)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
#if 0
	if (expand_stack(vma, ea))
		goto bad_area;
#endif /* XXX */
good_area:
	is_write = dsisr & MFC_DSISR_ACCESS_PUT;
	if (is_write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (dsisr & MFC_DSISR_ACCESS_DENIED)
			goto bad_area;
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
	ret = 0;
	switch (handle_mm_fault(mm, vma, ea, is_write)) {
	case VM_FAULT_MINOR:
		current->min_flt++;
		break;
	case VM_FAULT_MAJOR:
		current->maj_flt++;
		break;
	case VM_FAULT_SIGBUS:
		ret = -EFAULT;
		goto bad_area;
	case VM_FAULT_OOM:
		ret = -ENOMEM;
		goto bad_area;
	default:
		BUG();
	}
	up_read(&mm->mmap_sem);
	return ret;

bad_area:
	up_read(&mm->mmap_sem);
	return -EFAULT;
}

468
int spu_irq_class_1_bottom(struct spu *spu)
469 470 471 472
{
	u64 ea, dsisr, access, error = 0UL;
	int ret = 0;

473 474
	ea = spu->dar;
	dsisr = spu->dsisr;
475
	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
476 477
		u64 flags;

478 479
		access = (_PAGE_PRESENT | _PAGE_USER);
		access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
480
		local_irq_save(flags);
481 482
		if (hash_page(ea, access, 0x300) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
483
		local_irq_restore(flags);
484
	}
485
	if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
486 487 488 489 490
		if ((ret = spu_handle_mm_fault(spu)) != 0)
			error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
		else
			error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
	}
491 492 493
	spu->dar = 0UL;
	spu->dsisr = 0UL;
	if (!error) {
494
		spu_restart_dma(spu);
495 496 497
	} else {
		__spu_trap_invalid_dma(spu);
	}
498 499 500
	return ret;
}

501 502
static int __init find_spu_node_id(struct device_node *spe)
{
503
	const unsigned int *id;
504 505
	struct device_node *cpu;
	cpu = spe->parent->parent;
506
	id = get_property(cpu, "node-id", NULL);
507 508 509
	return id ? *id : 0;
}

510 511
static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
		const char *prop)
512 513 514
{
	static DEFINE_MUTEX(add_spumem_mutex);

515
	const struct address_prop {
516 517 518 519 520 521 522 523 524 525
		unsigned long address;
		unsigned int len;
	} __attribute__((packed)) *p;
	int proplen;

	unsigned long start_pfn, nr_pages;
	struct pglist_data *pgdata;
	struct zone *zone;
	int ret;

526
	p = get_property(spe, prop, &proplen);
527 528 529 530 531
	WARN_ON(proplen != sizeof (*p));

	start_pfn = p->address >> PAGE_SHIFT;
	nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;

532
	pgdata = NODE_DATA(spu->nid);
533 534 535 536 537 538 539 540 541 542
	zone = pgdata->node_zones;

	/* XXX rethink locking here */
	mutex_lock(&add_spumem_mutex);
	ret = __add_pages(zone, start_pfn, nr_pages);
	mutex_unlock(&add_spumem_mutex);

	return ret;
}

543 544
static void __iomem * __init map_spe_prop(struct spu *spu,
		struct device_node *n, const char *name)
545
{
546
	const struct address_prop {
547 548 549 550
		unsigned long address;
		unsigned int len;
	} __attribute__((packed)) *prop;

551
	const void *p;
552
	int proplen;
A
Al Viro 已提交
553
	void __iomem *ret = NULL;
554
	int err = 0;
555 556 557 558 559 560 561

	p = get_property(n, name, &proplen);
	if (proplen != sizeof (struct address_prop))
		return NULL;

	prop = p;

562
	err = cell_spuprop_present(spu, n, name);
563 564 565 566 567 568 569
	if (err && (err != -EEXIST))
		goto out;

	ret = ioremap(prop->address, prop->len);

 out:
	return ret;
570 571 572 573 574 575 576
}

static void spu_unmap(struct spu *spu)
{
	iounmap(spu->priv2);
	iounmap(spu->priv1);
	iounmap(spu->problem);
A
Al Viro 已提交
577
	iounmap((__force u8 __iomem *)spu->local_store);
578 579
}

580
/* This function shall be abstracted for HV platforms */
581
static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
582 583
{
	unsigned int isrc;
584
	const u32 *tmp;
585

586
	/* Get the interrupt source unit from the device-tree */
587
	tmp = get_property(np, "isrc", NULL);
588 589
	if (!tmp)
		return -ENODEV;
590 591 592 593 594
	isrc = tmp[0];

	/* Add the node number */
	isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
	spu->isrc = isrc;
595 596

	/* Now map interrupts of all 3 classes */
597 598 599
	spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
	spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
	spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
600 601 602 603 604

	/* Right now, we only fail if class 2 failed */
	return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
}

605
static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
606
{
607
	const char *prop;
608 609 610
	int ret;

	ret = -ENODEV;
611
	spu->name = get_property(node, "name", NULL);
612 613 614
	if (!spu->name)
		goto out;

615
	prop = get_property(node, "local-store", NULL);
616 617 618 619 620
	if (!prop)
		goto out;
	spu->local_store_phys = *(unsigned long *)prop;

	/* we use local store as ram, not io memory */
621 622
	spu->local_store = (void __force *)
		map_spe_prop(spu, node, "local-store");
623 624 625
	if (!spu->local_store)
		goto out;

626
	prop = get_property(node, "problem", NULL);
627 628 629 630
	if (!prop)
		goto out_unmap;
	spu->problem_phys = *(unsigned long *)prop;

631
	spu->problem= map_spe_prop(spu, node, "problem");
632 633 634
	if (!spu->problem)
		goto out_unmap;

635
	spu->priv1= map_spe_prop(spu, node, "priv1");
636
	/* priv1 is not available on a hypervisor */
637

638
	spu->priv2= map_spe_prop(spu, node, "priv2");
639 640 641 642 643 644 645 646 647 648 649
	if (!spu->priv2)
		goto out_unmap;
	ret = 0;
	goto out;

out_unmap:
	spu_unmap(spu);
out:
	return ret;
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
{
	struct of_irq oirq;
	int ret;
	int i;

	for (i=0; i < 3; i++) {
		ret = of_irq_map_one(np, i, &oirq);
		if (ret)
			goto err;

		ret = -EINVAL;
		spu->irqs[i] = irq_create_of_mapping(oirq.controller,
					oirq.specifier, oirq.size);
		if (spu->irqs[i] == NO_IRQ)
			goto err;
	}
	return 0;

err:
	pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
	for (; i >= 0; i--) {
		if (spu->irqs[i] != NO_IRQ)
			irq_dispose_mapping(spu->irqs[i]);
	}
	return ret;
}

static int spu_map_resource(struct device_node *node, int nr,
		void __iomem** virt, unsigned long *phys)
{
	struct resource resource = { };
	int ret;

	ret = of_address_to_resource(node, 0, &resource);
	if (ret)
		goto out;

	if (phys)
		*phys = resource.start;
	*virt = ioremap(resource.start, resource.end - resource.start);
	if (!*virt)
		ret = -EINVAL;

out:
	return ret;
}

static int __init spu_map_device(struct spu *spu, struct device_node *node)
{
	int ret = -ENODEV;
	spu->name = get_property(node, "name", NULL);
	if (!spu->name)
		goto out;

	ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
					&spu->local_store_phys);
	if (ret)
		goto out;
	ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
					&spu->problem_phys);
	if (ret)
		goto out_unmap;
	ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
					NULL);
	if (ret)
		goto out_unmap;

	if (!firmware_has_feature(FW_FEATURE_LPAR))
		ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
					NULL);
	if (ret)
		goto out_unmap;
	return 0;

out_unmap:
	spu_unmap(spu);
out:
	pr_debug("failed to map spe %s: %d\n", spu->name, ret);
	return ret;
}

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
struct sysdev_class spu_sysdev_class = {
	set_kset_name("spu")
};

static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
{
	struct spu *spu = container_of(sysdev, struct spu, sysdev);
	return sprintf(buf, "%d\n", spu->isrc);

}
static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);

extern int attach_sysdev_to_node(struct sys_device *dev, int nid);

static int spu_create_sysdev(struct spu *spu)
{
	int ret;

	spu->sysdev.id = spu->number;
	spu->sysdev.cls = &spu_sysdev_class;
	ret = sysdev_register(&spu->sysdev);
	if (ret) {
		printk(KERN_ERR "Can't register SPU %d with sysfs\n",
				spu->number);
		return ret;
	}

759 760
	if (spu->isrc != 0)
		sysdev_create_file(&spu->sysdev, &attr_isrc);
761 762 763 764 765 766 767 768 769 770 771 772
	sysfs_add_device_to_node(&spu->sysdev, spu->nid);

	return 0;
}

static void spu_destroy_sysdev(struct spu *spu)
{
	sysdev_remove_file(&spu->sysdev, &attr_isrc);
	sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
	sysdev_unregister(&spu->sysdev);
}

773 774 775 776 777 778 779
static int __init create_spu(struct device_node *spe)
{
	struct spu *spu;
	int ret;
	static int number;

	ret = -ENOMEM;
780
	spu = kzalloc(sizeof (*spu), GFP_KERNEL);
781 782 783 784
	if (!spu)
		goto out;

	ret = spu_map_device(spu, spe);
785 786 787
	/* try old method */
	if (ret)
		ret = spu_map_device_old(spu, spe);
788 789 790 791
	if (ret)
		goto out_free;

	spu->node = find_spu_node_id(spe);
792 793 794
	spu->nid = of_node_to_nid(spe);
	if (spu->nid == -1)
		spu->nid = 0;
795
	ret = spu_map_interrupts(spu, spe);
796 797
	if (ret)
		ret = spu_map_interrupts_old(spu, spe);
798 799
	if (ret)
		goto out_unmap;
800
	spin_lock_init(&spu->register_lock);
801 802
	spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
	spu_mfc_sr1_set(spu, 0x33);
803
	mutex_lock(&spu_mutex);
804

805 806 807
	spu->number = number++;
	ret = spu_request_irqs(spu);
	if (ret)
808
		goto out_unlock;
809

810 811 812 813
	ret = spu_create_sysdev(spu);
	if (ret)
		goto out_free_irqs;

814
	list_add(&spu->list, &spu_list[spu->node]);
815
	mutex_unlock(&spu_mutex);
816 817 818 819 820 821

	pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
		spu->name, spu->isrc, spu->local_store,
		spu->problem, spu->priv1, spu->priv2, spu->number);
	goto out;

822 823
out_free_irqs:
	spu_free_irqs(spu);
824
out_unlock:
825
	mutex_unlock(&spu_mutex);
826
out_unmap:
827 828 829 830 831 832 833 834 835 836 837
	spu_unmap(spu);
out_free:
	kfree(spu);
out:
	return ret;
}

static void destroy_spu(struct spu *spu)
{
	list_del_init(&spu->list);

838
	spu_destroy_sysdev(spu);
839 840 841 842 843 844 845 846
	spu_free_irqs(spu);
	spu_unmap(spu);
	kfree(spu);
}

static void cleanup_spu_base(void)
{
	struct spu *spu, *tmp;
847 848
	int node;

849
	mutex_lock(&spu_mutex);
850 851 852 853
	for (node = 0; node < MAX_NUMNODES; node++) {
		list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
			destroy_spu(spu);
	}
854
	mutex_unlock(&spu_mutex);
855
	sysdev_class_unregister(&spu_sysdev_class);
856 857 858 859 860 861
}
module_exit(cleanup_spu_base);

static int __init init_spu_base(void)
{
	struct device_node *node;
862
	int i, ret;
863

864 865 866 867 868
	/* create sysdev class for spus */
	ret = sysdev_class_register(&spu_sysdev_class);
	if (ret)
		return ret;

869 870 871
	for (i = 0; i < MAX_NUMNODES; i++)
		INIT_LIST_HEAD(&spu_list[i]);

872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
	ret = -ENODEV;
	for (node = of_find_node_by_type(NULL, "spe");
			node; node = of_find_node_by_type(node, "spe")) {
		ret = create_spu(node);
		if (ret) {
			printk(KERN_WARNING "%s: Error initializing %s\n",
				__FUNCTION__, node->name);
			cleanup_spu_base();
			break;
		}
	}
	return ret;
}
module_init(init_spu_base);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");