file.c 54.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * SPU file system -- file contents
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

23 24
#undef DEBUG

25 26 27
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/module.h>
28
#include <linux/pagemap.h>
29
#include <linux/poll.h>
30
#include <linux/ptrace.h>
31
#include <linux/seq_file.h>
32 33 34 35

#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/spu.h>
36
#include <asm/spu_info.h>
37 38 39 40
#include <asm/uaccess.h>

#include "spufs.h"

41 42
#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
/* Simple attribute files */
struct spufs_attr {
	int (*get)(void *, u64 *);
	int (*set)(void *, u64);
	char get_buf[24];       /* enough to store a u64 and "\n\0" */
	char set_buf[24];
	void *data;
	const char *fmt;        /* format for read operation */
	struct mutex mutex;     /* protects access to these buffers */
};

static int spufs_attr_open(struct inode *inode, struct file *file,
		int (*get)(void *, u64 *), int (*set)(void *, u64),
		const char *fmt)
{
	struct spufs_attr *attr;

	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
	if (!attr)
		return -ENOMEM;

	attr->get = get;
	attr->set = set;
	attr->data = inode->i_private;
	attr->fmt = fmt;
	mutex_init(&attr->mutex);
	file->private_data = attr;

	return nonseekable_open(inode, file);
}

static int spufs_attr_release(struct inode *inode, struct file *file)
{
       kfree(file->private_data);
	return 0;
}

static ssize_t spufs_attr_read(struct file *file, char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->get)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	if (*ppos) {		/* continued read */
		size = strlen(attr->get_buf);
	} else {		/* first read */
		u64 val;
		ret = attr->get(attr->data, &val);
		if (ret)
			goto out;

		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
				 attr->fmt, (unsigned long long)val);
	}

	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	u64 val;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->set)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	ret = -EFAULT;
	size = min(sizeof(attr->set_buf) - 1, len);
	if (copy_from_user(attr->set_buf, buf, size))
		goto out;

	ret = len; /* claim we got the whole input */
	attr->set_buf[size] = '\0';
	val = simple_strtol(attr->set_buf, NULL, 0);
	attr->set(attr->data, val);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
static int __fops ## _open(struct inode *inode, struct file *file)	\
{									\
	__simple_attr_check_format(__fmt, 0ull);			\
	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
}									\
static struct file_operations __fops = {				\
	.owner	 = THIS_MODULE,						\
	.open	 = __fops ## _open,					\
	.release = spufs_attr_release,					\
	.read	 = spufs_attr_read,					\
	.write	 = spufs_attr_write,					\
};

157

158 159 160 161
static int
spufs_mem_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
162
	struct spu_context *ctx = i->i_ctx;
163

164
	mutex_lock(&ctx->mapping_lock);
165
	file->private_data = ctx;
166 167
	if (!i->i_openers++)
		ctx->local_store = inode->i_mapping;
168
	mutex_unlock(&ctx->mapping_lock);
169 170 171 172 173 174 175 176 177
	return 0;
}

static int
spufs_mem_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

178
	mutex_lock(&ctx->mapping_lock);
179 180
	if (!--i->i_openers)
		ctx->local_store = NULL;
181
	mutex_unlock(&ctx->mapping_lock);
182 183 184
	return 0;
}

185 186 187 188 189 190 191 192 193
static ssize_t
__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	char *local_store = ctx->ops->get_ls(ctx);
	return simple_read_from_buffer(buffer, size, pos, local_store,
					LS_SIZE);
}

194 195 196 197
static ssize_t
spufs_mem_read(struct file *file, char __user *buffer,
				size_t size, loff_t *pos)
{
198
	struct spu_context *ctx = file->private_data;
199
	ssize_t ret;
200

201
	spu_acquire(ctx);
202
	ret = __spufs_mem_read(ctx, buffer, size, pos);
203
	spu_release(ctx);
204 205 206 207 208
	return ret;
}

static ssize_t
spufs_mem_write(struct file *file, const char __user *buffer,
209
					size_t size, loff_t *ppos)
210 211
{
	struct spu_context *ctx = file->private_data;
212
	char *local_store;
213
	loff_t pos = *ppos;
214
	int ret;
215

216 217 218
	if (pos < 0)
		return -EINVAL;
	if (pos > LS_SIZE)
219
		return -EFBIG;
220 221
	if (size > LS_SIZE - pos)
		size = LS_SIZE - pos;
222 223 224

	spu_acquire(ctx);
	local_store = ctx->ops->get_ls(ctx);
225
	ret = copy_from_user(local_store + pos, buffer, size);
226
	spu_release(ctx);
227 228 229 230 231

	if (ret)
		return -EFAULT;
	*ppos = pos + size;
	return size;
232 233
}

234 235
static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
					  unsigned long address)
236
{
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	struct spu_context *ctx	= vma->vm_file->private_data;
	unsigned long pfn, offset, addr0 = address;
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_state *csa = &ctx->csa;
	int psize;

	/* Check what page size we are using */
	psize = get_slice_psize(vma->vm_mm, address);

	/* Some sanity checking */
	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));

	/* Wow, 64K, cool, we need to align the address though */
	if (csa->use_big_pages) {
		BUG_ON(vma->vm_start & 0xffff);
		address &= ~0xfffful;
	}
#endif /* CONFIG_SPU_FS_64K_LS */
255

256
	offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
257 258 259
	if (offset >= LS_SIZE)
		return NOPFN_SIGBUS;

260 261 262
	pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
		 addr0, address, offset);

263 264
	spu_acquire(ctx);

265 266
	if (ctx->state == SPU_STATE_SAVED) {
		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
267
							& ~_PAGE_NO_CACHE);
268
		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
269 270
	} else {
		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
271 272
					     | _PAGE_NO_CACHE);
		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
273
	}
274
	vm_insert_pfn(vma, address, pfn);
275

276
	spu_release(ctx);
277

278
	return NOPFN_REFAULT;
279 280
}

281

282
static struct vm_operations_struct spufs_mem_mmap_vmops = {
283
	.nopfn = spufs_mem_mmap_nopfn,
284 285
};

286
static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
287
{
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* Sanity check VMA alignment */
	if (csa->use_big_pages) {
		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
			 vma->vm_pgoff);
		if (vma->vm_start & 0xffff)
			return -EINVAL;
		if (vma->vm_pgoff & 0xf)
			return -EINVAL;
	}
#endif /* CONFIG_SPU_FS_64K_LS */

304 305
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
306

307
	vma->vm_flags |= VM_IO | VM_PFNMAP;
308 309 310 311
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
				     | _PAGE_NO_CACHE);

	vma->vm_ops = &spufs_mem_mmap_vmops;
312 313 314
	return 0;
}

315
#ifdef CONFIG_SPU_FS_64K_LS
316 317 318
static unsigned long spufs_get_unmapped_area(struct file *file,
		unsigned long addr, unsigned long len, unsigned long pgoff,
		unsigned long flags)
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
{
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* If not using big pages, fallback to normal MM g_u_a */
	if (!csa->use_big_pages)
		return current->mm->get_unmapped_area(file, addr, len,
						      pgoff, flags);

	/* Else, try to obtain a 64K pages slice */
	return slice_get_unmapped_area(addr, len, flags,
				       MMU_PAGE_64K, 1, 0);
}
#endif /* CONFIG_SPU_FS_64K_LS */

334
static const struct file_operations spufs_mem_fops = {
335 336 337 338 339 340
	.open			= spufs_mem_open,
	.release		= spufs_mem_release,
	.read			= spufs_mem_read,
	.write			= spufs_mem_write,
	.llseek			= generic_file_llseek,
	.mmap			= spufs_mem_mmap,
341 342 343
#ifdef CONFIG_SPU_FS_64K_LS
	.get_unmapped_area	= spufs_get_unmapped_area,
#endif
344 345
};

346
static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
347
				    unsigned long address,
348
				    unsigned long ps_offs,
349
				    unsigned long ps_size)
350 351
{
	struct spu_context *ctx = vma->vm_file->private_data;
352
	unsigned long area, offset = address - vma->vm_start;
353 354

	offset += vma->vm_pgoff << PAGE_SHIFT;
355
	if (offset >= ps_size)
356
		return NOPFN_SIGBUS;
357

358 359 360 361 362 363 364
	/*
	 * We have to wait for context to be loaded before we have
	 * pages to hand out to the user, but we don't want to wait
	 * with the mmap_sem held.
	 * It is possible to drop the mmap_sem here, but then we need
	 * to return NOPFN_REFAULT because the mappings may have
	 * hanged.
365
	 */
366 367 368 369 370 371 372
	spu_acquire(ctx);
	if (ctx->state == SPU_STATE_SAVED) {
		up_read(&current->mm->mmap_sem);
		spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
		down_read(&current->mm->mmap_sem);
		goto out;
	}
373 374

	area = ctx->spu->problem_phys + ps_offs;
375
	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
376 377

out:
378 379
	spu_release(ctx);

380
	return NOPFN_REFAULT;
381 382
}

383
#if SPUFS_MMAP_4K
384 385
static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
					   unsigned long address)
386
{
387
	return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
388 389 390
}

static struct vm_operations_struct spufs_cntl_mmap_vmops = {
391
	.nopfn = spufs_cntl_mmap_nopfn,
392 393 394 395 396 397 398 399 400 401
};

/*
 * mmap support for problem state control area [0x4000 - 0x4fff].
 */
static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

402
	vma->vm_flags |= VM_IO | VM_PFNMAP;
403
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
404
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
405 406 407 408

	vma->vm_ops = &spufs_cntl_mmap_vmops;
	return 0;
}
409 410 411
#else /* SPUFS_MMAP_4K */
#define spufs_cntl_mmap NULL
#endif /* !SPUFS_MMAP_4K */
412

413
static int spufs_cntl_get(void *data, u64 *val)
414
{
415
	struct spu_context *ctx = data;
416

417
	spu_acquire(ctx);
418
	*val = ctx->ops->status_read(ctx);
419 420
	spu_release(ctx);

421
	return 0;
422 423
}

424
static int spufs_cntl_set(void *data, u64 val)
425
{
426 427 428 429 430
	struct spu_context *ctx = data;

	spu_acquire(ctx);
	ctx->ops->runcntl_write(ctx, val);
	spu_release(ctx);
431 432

	return 0;
433 434
}

435
static int spufs_cntl_open(struct inode *inode, struct file *file)
436
{
437 438 439
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

440
	mutex_lock(&ctx->mapping_lock);
441
	file->private_data = ctx;
442 443
	if (!i->i_openers++)
		ctx->cntl = inode->i_mapping;
444
	mutex_unlock(&ctx->mapping_lock);
445
	return spufs_attr_open(inode, file, spufs_cntl_get,
446
					spufs_cntl_set, "0x%08lx");
447 448
}

449 450 451 452 453 454
static int
spufs_cntl_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

455
	spufs_attr_release(inode, file);
456

457
	mutex_lock(&ctx->mapping_lock);
458 459
	if (!--i->i_openers)
		ctx->cntl = NULL;
460
	mutex_unlock(&ctx->mapping_lock);
461 462 463
	return 0;
}

464
static const struct file_operations spufs_cntl_fops = {
465
	.open = spufs_cntl_open,
466
	.release = spufs_cntl_release,
467 468
	.read = spufs_attr_read,
	.write = spufs_attr_write,
469 470 471
	.mmap = spufs_cntl_mmap,
};

472 473 474 475 476 477 478 479
static int
spufs_regs_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;
	return 0;
}

480 481 482 483 484 485 486 487 488
static ssize_t
__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      lscsa->gprs, sizeof lscsa->gprs);
}

489 490 491 492 493
static ssize_t
spufs_regs_read(struct file *file, char __user *buffer,
		size_t size, loff_t *pos)
{
	int ret;
494
	struct spu_context *ctx = file->private_data;
495 496

	spu_acquire_saved(ctx);
497
	ret = __spufs_regs_read(ctx, buffer, size, pos);
498
	spu_release_saved(ctx);
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	return ret;
}

static ssize_t
spufs_regs_write(struct file *file, const char __user *buffer,
		 size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

	size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
	if (size <= 0)
		return -EFBIG;
	*pos += size;

	spu_acquire_saved(ctx);

	ret = copy_from_user(lscsa->gprs + *pos - size,
			     buffer, size) ? -EFAULT : size;

520
	spu_release_saved(ctx);
521 522 523
	return ret;
}

524
static const struct file_operations spufs_regs_fops = {
525 526 527
	.open	 = spufs_regs_open,
	.read    = spufs_regs_read,
	.write   = spufs_regs_write,
528 529 530
	.llseek  = generic_file_llseek,
};

531 532 533 534 535 536 537 538 539
static ssize_t
__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
			size_t size, loff_t * pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      &lscsa->fpcr, sizeof(lscsa->fpcr));
}

540 541 542 543 544
static ssize_t
spufs_fpcr_read(struct file *file, char __user * buffer,
		size_t size, loff_t * pos)
{
	int ret;
545
	struct spu_context *ctx = file->private_data;
546 547

	spu_acquire_saved(ctx);
548
	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
549
	spu_release_saved(ctx);
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
	return ret;
}

static ssize_t
spufs_fpcr_write(struct file *file, const char __user * buffer,
		 size_t size, loff_t * pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
	if (size <= 0)
		return -EFBIG;
	*pos += size;

	spu_acquire_saved(ctx);

	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
			     buffer, size) ? -EFAULT : size;

571
	spu_release_saved(ctx);
572 573 574
	return ret;
}

575
static const struct file_operations spufs_fpcr_fops = {
576 577 578 579 580 581
	.open = spufs_regs_open,
	.read = spufs_fpcr_read,
	.write = spufs_fpcr_write,
	.llseek = generic_file_llseek,
};

582 583 584 585 586 587 588 589 590
/* generic open function for all pipe-like files */
static int spufs_pipe_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;

	return nonseekable_open(inode, file);
}

591 592 593 594 595 596 597 598
/*
 * Read as many bytes from the mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 */
599 600 601
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
602
	struct spu_context *ctx = file->private_data;
603 604
	u32 mbox_data, __user *udata;
	ssize_t count;
605 606 607 608

	if (len < 4)
		return -EINVAL;

609 610 611 612 613
	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

614
	spu_acquire(ctx);
615
	for (count = 0; (count + 4) <= len; count += 4, udata++) {
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
		int ret;
		ret = ctx->ops->mbox_read(ctx, &mbox_data);
		if (ret == 0)
			break;

		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(mbox_data, udata);
		if (ret) {
			if (!count)
				count = -EFAULT;
			break;
		}
	}
633
	spu_release(ctx);
634

635 636
	if (!count)
		count = -EAGAIN;
637

638
	return count;
639 640
}

641
static const struct file_operations spufs_mbox_fops = {
642 643 644 645 646 647 648
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_read,
};

static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
649
	struct spu_context *ctx = file->private_data;
650 651 652 653 654
	u32 mbox_stat;

	if (len < 4)
		return -EINVAL;

655 656 657 658 659
	spu_acquire(ctx);

	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;

	spu_release(ctx);
660 661 662 663 664 665 666

	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
		return -EFAULT;

	return 4;
}

667
static const struct file_operations spufs_mbox_stat_fops = {
668 669 670 671 672
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_stat_read,
};

/* low-level ibox access function */
673
size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
674
{
675 676
	return ctx->ops->ibox_read(ctx, data);
}
677

678 679 680
static int spufs_ibox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
681

682
	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
683 684
}

685 686
/* interrupt-level ibox callback function. */
void spufs_ibox_callback(struct spu *spu)
687
{
688 689
	struct spu_context *ctx = spu->ctx;

690 691 692
	if (!ctx)
		return;

693 694
	wake_up_all(&ctx->ibox_wq);
	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
695 696
}

697 698 699 700 701 702 703 704 705 706 707 708
/*
 * Read as many bytes from the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * any data is available, but return when we have been able to
 * read something.
 */
709 710 711
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
712
	struct spu_context *ctx = file->private_data;
713 714
	u32 ibox_data, __user *udata;
	ssize_t count;
715 716 717 718

	if (len < 4)
		return -EINVAL;

719 720 721 722 723
	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

724
	spu_acquire(ctx);
725

726 727
	/* wait only for the first element */
	count = 0;
728
	if (file->f_flags & O_NONBLOCK) {
729
		if (!spu_ibox_read(ctx, &ibox_data))
730
			count = -EAGAIN;
731
	} else {
732
		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
733
	}
734 735
	if (count)
		goto out;
736

737 738 739 740
	/* if we can't write at all, return -EFAULT */
	count = __put_user(ibox_data, udata);
	if (count)
		goto out;
741

742 743 744 745 746 747 748 749 750 751 752 753 754 755
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = ctx->ops->ibox_read(ctx, &ibox_data);
		if (ret == 0)
			break;
		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(ibox_data, udata);
		if (ret)
			break;
	}
756

757 758
out:
	spu_release(ctx);
759

760
	return count;
761 762 763 764
}

static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
{
765
	struct spu_context *ctx = file->private_data;
766 767
	unsigned int mask;

768
	poll_wait(file, &ctx->ibox_wq, wait);
769

770 771 772
	spu_acquire(ctx);
	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
	spu_release(ctx);
773 774 775 776

	return mask;
}

777
static const struct file_operations spufs_ibox_fops = {
778 779 780 781 782 783 784 785 786
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_read,
	.poll	= spufs_ibox_poll,
	.fasync	= spufs_ibox_fasync,
};

static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
787
	struct spu_context *ctx = file->private_data;
788 789 790 791 792
	u32 ibox_stat;

	if (len < 4)
		return -EINVAL;

793 794 795
	spu_acquire(ctx);
	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
	spu_release(ctx);
796 797 798 799 800 801 802

	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
		return -EFAULT;

	return 4;
}

803
static const struct file_operations spufs_ibox_stat_fops = {
804 805 806 807 808
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_stat_read,
};

/* low-level mailbox write */
809
size_t spu_wbox_write(struct spu_context *ctx, u32 data)
810
{
811 812
	return ctx->ops->wbox_write(ctx, data);
}
813

814 815 816 817
static int spufs_wbox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
	int ret;
818

819
	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
820 821 822 823

	return ret;
}

824 825
/* interrupt-level wbox callback function. */
void spufs_wbox_callback(struct spu *spu)
826
{
827 828
	struct spu_context *ctx = spu->ctx;

829 830 831
	if (!ctx)
		return;

832 833
	wake_up_all(&ctx->wbox_wq);
	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
834 835
}

836 837 838 839 840 841 842 843 844 845 846 847
/*
 * Write as many bytes to the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - the mailbox is full
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * space is availabyl, but return when we have been able to
 * write something.
 */
848 849 850
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
			size_t len, loff_t *pos)
{
851
	struct spu_context *ctx = file->private_data;
852 853
	u32 wbox_data, __user *udata;
	ssize_t count;
854 855 856 857

	if (len < 4)
		return -EINVAL;

858 859 860 861 862
	udata = (void __user *)buf;
	if (!access_ok(VERIFY_READ, buf, len))
		return -EFAULT;

	if (__get_user(wbox_data, udata))
863 864
		return -EFAULT;

865 866
	spu_acquire(ctx);

867 868 869 870 871
	/*
	 * make sure we can at least write one element, by waiting
	 * in case of !O_NONBLOCK
	 */
	count = 0;
872
	if (file->f_flags & O_NONBLOCK) {
873
		if (!spu_wbox_write(ctx, wbox_data))
874
			count = -EAGAIN;
875
	} else {
876
		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
877 878
	}

879 880
	if (count)
		goto out;
881

882
	/* write as much as possible */
883 884 885 886 887 888 889 890 891 892 893 894 895 896
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = __get_user(wbox_data, udata);
		if (ret)
			break;

		ret = spu_wbox_write(ctx, wbox_data);
		if (ret == 0)
			break;
	}

out:
	spu_release(ctx);
	return count;
897 898 899 900
}

static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
{
901
	struct spu_context *ctx = file->private_data;
902 903
	unsigned int mask;

904
	poll_wait(file, &ctx->wbox_wq, wait);
905

906 907 908
	spu_acquire(ctx);
	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
	spu_release(ctx);
909 910 911 912

	return mask;
}

913
static const struct file_operations spufs_wbox_fops = {
914 915 916 917 918 919 920 921 922
	.open	= spufs_pipe_open,
	.write	= spufs_wbox_write,
	.poll	= spufs_wbox_poll,
	.fasync	= spufs_wbox_fasync,
};

static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
923
	struct spu_context *ctx = file->private_data;
924 925 926 927 928
	u32 wbox_stat;

	if (len < 4)
		return -EINVAL;

929 930 931
	spu_acquire(ctx);
	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
	spu_release(ctx);
932 933 934 935 936 937 938

	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
		return -EFAULT;

	return 4;
}

939
static const struct file_operations spufs_wbox_stat_fops = {
940 941 942 943
	.open	= spufs_pipe_open,
	.read	= spufs_wbox_stat_read,
};

944 945 946 947
static int spufs_signal1_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;
948

949
	mutex_lock(&ctx->mapping_lock);
950
	file->private_data = ctx;
951 952
	if (!i->i_openers++)
		ctx->signal1 = inode->i_mapping;
953
	mutex_unlock(&ctx->mapping_lock);
954 955 956
	return nonseekable_open(inode, file);
}

957 958 959 960 961 962
static int
spufs_signal1_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

963
	mutex_lock(&ctx->mapping_lock);
964 965
	if (!--i->i_openers)
		ctx->signal1 = NULL;
966
	mutex_unlock(&ctx->mapping_lock);
967 968 969
	return 0;
}

970
static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
971 972
			size_t len, loff_t *pos)
{
973
	int ret = 0;
974 975 976 977 978
	u32 data;

	if (len < 4)
		return -EINVAL;

979 980 981 982
	if (ctx->csa.spu_chnlcnt_RW[3]) {
		data = ctx->csa.spu_chnldata_RW[3];
		ret = 4;
	}
983

984 985 986
	if (!ret)
		goto out;

987 988 989
	if (copy_to_user(buf, &data, 4))
		return -EFAULT;

990 991
out:
	return ret;
992 993
}

994 995 996 997 998 999 1000 1001
static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	int ret;
	struct spu_context *ctx = file->private_data;

	spu_acquire_saved(ctx);
	ret = __spufs_signal1_read(ctx, buf, len, pos);
1002
	spu_release_saved(ctx);
1003 1004 1005 1006

	return ret;
}

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx;
	u32 data;

	ctx = file->private_data;

	if (len < 4)
		return -EINVAL;

	if (copy_from_user(&data, buf, 4))
		return -EFAULT;

1021 1022 1023
	spu_acquire(ctx);
	ctx->ops->signal1_write(ctx, data);
	spu_release(ctx);
1024 1025 1026 1027

	return 4;
}

1028 1029
static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma,
					      unsigned long address)
1030
{
1031
#if PAGE_SIZE == 0x1000
1032
	return spufs_ps_nopfn(vma, address, 0x14000, 0x1000);
1033 1034 1035 1036
#elif PAGE_SIZE == 0x10000
	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
	 * signal 1 and 2 area
	 */
1037
	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1038 1039 1040
#else
#error unsupported page size
#endif
1041 1042 1043
}

static struct vm_operations_struct spufs_signal1_mmap_vmops = {
1044
	.nopfn = spufs_signal1_mmap_nopfn,
1045 1046 1047 1048 1049 1050 1051
};

static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

1052
	vma->vm_flags |= VM_IO | VM_PFNMAP;
1053
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1054
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1055 1056 1057 1058 1059

	vma->vm_ops = &spufs_signal1_mmap_vmops;
	return 0;
}

1060
static const struct file_operations spufs_signal1_fops = {
1061
	.open = spufs_signal1_open,
1062
	.release = spufs_signal1_release,
1063 1064
	.read = spufs_signal1_read,
	.write = spufs_signal1_write,
1065
	.mmap = spufs_signal1_mmap,
1066 1067
};

1068 1069 1070 1071 1072 1073 1074
static const struct file_operations spufs_signal1_nosched_fops = {
	.open = spufs_signal1_open,
	.release = spufs_signal1_release,
	.write = spufs_signal1_write,
	.mmap = spufs_signal1_mmap,
};

1075 1076 1077 1078
static int spufs_signal2_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;
1079

1080
	mutex_lock(&ctx->mapping_lock);
1081
	file->private_data = ctx;
1082 1083
	if (!i->i_openers++)
		ctx->signal2 = inode->i_mapping;
1084
	mutex_unlock(&ctx->mapping_lock);
1085 1086 1087
	return nonseekable_open(inode, file);
}

1088 1089 1090 1091 1092 1093
static int
spufs_signal2_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

1094
	mutex_lock(&ctx->mapping_lock);
1095 1096
	if (!--i->i_openers)
		ctx->signal2 = NULL;
1097
	mutex_unlock(&ctx->mapping_lock);
1098 1099 1100
	return 0;
}

1101
static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
1102 1103
			size_t len, loff_t *pos)
{
1104
	int ret = 0;
1105 1106 1107 1108 1109
	u32 data;

	if (len < 4)
		return -EINVAL;

1110 1111 1112 1113
	if (ctx->csa.spu_chnlcnt_RW[4]) {
		data =  ctx->csa.spu_chnldata_RW[4];
		ret = 4;
	}
1114

1115 1116 1117
	if (!ret)
		goto out;

1118 1119 1120
	if (copy_to_user(buf, &data, 4))
		return -EFAULT;

1121
out:
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	return ret;
}

static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	int ret;

	spu_acquire_saved(ctx);
	ret = __spufs_signal2_read(ctx, buf, len, pos);
1133
	spu_release_saved(ctx);
1134 1135

	return ret;
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
}

static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx;
	u32 data;

	ctx = file->private_data;

	if (len < 4)
		return -EINVAL;

	if (copy_from_user(&data, buf, 4))
		return -EFAULT;

1152 1153 1154
	spu_acquire(ctx);
	ctx->ops->signal2_write(ctx, data);
	spu_release(ctx);
1155 1156 1157 1158

	return 4;
}

1159
#if SPUFS_MMAP_4K
1160 1161
static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma,
					      unsigned long address)
1162
{
1163
#if PAGE_SIZE == 0x1000
1164
	return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000);
1165 1166 1167 1168
#elif PAGE_SIZE == 0x10000
	/* For 64k pages, both signal1 and signal2 can be used to mmap the whole
	 * signal 1 and 2 area
	 */
1169
	return spufs_ps_nopfn(vma, address, 0x10000, 0x10000);
1170 1171 1172
#else
#error unsupported page size
#endif
1173 1174 1175
}

static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1176
	.nopfn = spufs_signal2_mmap_nopfn,
1177 1178 1179 1180 1181 1182 1183
};

static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

1184
	vma->vm_flags |= VM_IO | VM_PFNMAP;
1185
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1186
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1187 1188 1189 1190

	vma->vm_ops = &spufs_signal2_mmap_vmops;
	return 0;
}
1191 1192 1193
#else /* SPUFS_MMAP_4K */
#define spufs_signal2_mmap NULL
#endif /* !SPUFS_MMAP_4K */
1194

1195
static const struct file_operations spufs_signal2_fops = {
1196
	.open = spufs_signal2_open,
1197
	.release = spufs_signal2_release,
1198 1199
	.read = spufs_signal2_read,
	.write = spufs_signal2_write,
1200
	.mmap = spufs_signal2_mmap,
1201 1202
};

1203 1204 1205 1206 1207 1208 1209
static const struct file_operations spufs_signal2_nosched_fops = {
	.open = spufs_signal2_open,
	.release = spufs_signal2_release,
	.write = spufs_signal2_write,
	.mmap = spufs_signal2_mmap,
};

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
/*
 * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the
 * work of acquiring (or not) the SPU context before calling through
 * to the actual get routine. The set routine is called directly.
 */
#define SPU_ATTR_NOACQUIRE	0
#define SPU_ATTR_ACQUIRE	1
#define SPU_ATTR_ACQUIRE_SAVED	2

#define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire)	\
1220
static int __##__get(void *data, u64 *val)				\
1221 1222 1223 1224 1225
{									\
	struct spu_context *ctx = data;					\
									\
	if (__acquire == SPU_ATTR_ACQUIRE) {				\
		spu_acquire(ctx);					\
1226
		*val = __get(ctx);					\
1227 1228 1229
		spu_release(ctx);					\
	} else if (__acquire == SPU_ATTR_ACQUIRE_SAVED)	{		\
		spu_acquire_saved(ctx);					\
1230
		*val = __get(ctx);					\
1231 1232
		spu_release_saved(ctx);					\
	} else								\
1233
		*val = __get(ctx);					\
1234
									\
1235
	return 0;							\
1236
}									\
1237
DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt);
1238

1239
static int spufs_signal1_type_set(void *data, u64 val)
1240 1241 1242
{
	struct spu_context *ctx = data;

1243 1244 1245
	spu_acquire(ctx);
	ctx->ops->signal1_type_set(ctx, val);
	spu_release(ctx);
1246 1247

	return 0;
1248 1249
}

1250
static u64 spufs_signal1_type_get(struct spu_context *ctx)
1251 1252 1253
{
	return ctx->ops->signal1_type_get(ctx);
}
1254 1255
DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
		       spufs_signal1_type_set, "%llu", SPU_ATTR_ACQUIRE);
1256

1257

1258
static int spufs_signal2_type_set(void *data, u64 val)
1259 1260 1261
{
	struct spu_context *ctx = data;

1262 1263 1264
	spu_acquire(ctx);
	ctx->ops->signal2_type_set(ctx, val);
	spu_release(ctx);
1265 1266

	return 0;
1267 1268
}

1269
static u64 spufs_signal2_type_get(struct spu_context *ctx)
1270 1271 1272
{
	return ctx->ops->signal2_type_get(ctx);
}
1273 1274
DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
		       spufs_signal2_type_set, "%llu", SPU_ATTR_ACQUIRE);
1275

1276
#if SPUFS_MMAP_4K
1277 1278
static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma,
					  unsigned long address)
1279
{
1280
	return spufs_ps_nopfn(vma, address, 0x0000, 0x1000);
1281 1282 1283
}

static struct vm_operations_struct spufs_mss_mmap_vmops = {
1284
	.nopfn = spufs_mss_mmap_nopfn,
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
};

/*
 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
 */
static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

1295
	vma->vm_flags |= VM_IO | VM_PFNMAP;
1296
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1297
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1298 1299 1300 1301

	vma->vm_ops = &spufs_mss_mmap_vmops;
	return 0;
}
1302 1303 1304
#else /* SPUFS_MMAP_4K */
#define spufs_mss_mmap NULL
#endif /* !SPUFS_MMAP_4K */
1305 1306 1307 1308

static int spufs_mss_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
1309
	struct spu_context *ctx = i->i_ctx;
1310 1311

	file->private_data = i->i_ctx;
1312

1313
	mutex_lock(&ctx->mapping_lock);
1314 1315
	if (!i->i_openers++)
		ctx->mss = inode->i_mapping;
1316
	mutex_unlock(&ctx->mapping_lock);
1317 1318 1319
	return nonseekable_open(inode, file);
}

1320 1321 1322 1323 1324 1325
static int
spufs_mss_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

1326
	mutex_lock(&ctx->mapping_lock);
1327 1328
	if (!--i->i_openers)
		ctx->mss = NULL;
1329
	mutex_unlock(&ctx->mapping_lock);
1330 1331 1332
	return 0;
}

1333
static const struct file_operations spufs_mss_fops = {
1334
	.open	 = spufs_mss_open,
1335
	.release = spufs_mss_release,
1336
	.mmap	 = spufs_mss_mmap,
1337 1338
};

1339 1340
static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma,
					    unsigned long address)
1341
{
1342
	return spufs_ps_nopfn(vma, address, 0x0000, 0x20000);
1343 1344 1345
}

static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1346
	.nopfn = spufs_psmap_mmap_nopfn,
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
};

/*
 * mmap support for full problem state area [0x00000 - 0x1ffff].
 */
static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

1357
	vma->vm_flags |= VM_IO | VM_PFNMAP;
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);

	vma->vm_ops = &spufs_psmap_mmap_vmops;
	return 0;
}

static int spufs_psmap_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
1368
	struct spu_context *ctx = i->i_ctx;
1369

1370
	mutex_lock(&ctx->mapping_lock);
1371
	file->private_data = i->i_ctx;
1372 1373
	if (!i->i_openers++)
		ctx->psmap = inode->i_mapping;
1374
	mutex_unlock(&ctx->mapping_lock);
1375 1376 1377
	return nonseekable_open(inode, file);
}

1378 1379 1380 1381 1382 1383
static int
spufs_psmap_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

1384
	mutex_lock(&ctx->mapping_lock);
1385 1386
	if (!--i->i_openers)
		ctx->psmap = NULL;
1387
	mutex_unlock(&ctx->mapping_lock);
1388 1389 1390
	return 0;
}

1391
static const struct file_operations spufs_psmap_fops = {
1392
	.open	 = spufs_psmap_open,
1393
	.release = spufs_psmap_release,
1394
	.mmap	 = spufs_psmap_mmap,
1395 1396 1397
};


1398
#if SPUFS_MMAP_4K
1399 1400
static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma,
					  unsigned long address)
1401
{
1402
	return spufs_ps_nopfn(vma, address, 0x3000, 0x1000);
1403 1404 1405
}

static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1406
	.nopfn = spufs_mfc_mmap_nopfn,
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
};

/*
 * mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
 */
static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

1417
	vma->vm_flags |= VM_IO | VM_PFNMAP;
1418
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
1419
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);
1420 1421 1422 1423

	vma->vm_ops = &spufs_mfc_mmap_vmops;
	return 0;
}
1424 1425 1426
#else /* SPUFS_MMAP_4K */
#define spufs_mfc_mmap NULL
#endif /* !SPUFS_MMAP_4K */
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439

static int spufs_mfc_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

	/* we don't want to deal with DMA into other processes */
	if (ctx->owner != current->mm)
		return -EINVAL;

	if (atomic_read(&inode->i_count) != 1)
		return -EBUSY;

1440
	mutex_lock(&ctx->mapping_lock);
1441
	file->private_data = ctx;
1442 1443
	if (!i->i_openers++)
		ctx->mfc = inode->i_mapping;
1444
	mutex_unlock(&ctx->mapping_lock);
1445 1446 1447
	return nonseekable_open(inode, file);
}

1448 1449 1450 1451 1452 1453
static int
spufs_mfc_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

1454
	mutex_lock(&ctx->mapping_lock);
1455 1456
	if (!--i->i_openers)
		ctx->mfc = NULL;
1457
	mutex_unlock(&ctx->mapping_lock);
1458 1459 1460
	return 0;
}

1461 1462 1463 1464 1465
/* interrupt-level mfc callback function. */
void spufs_mfc_callback(struct spu *spu)
{
	struct spu_context *ctx = spu->ctx;

1466 1467 1468
	if (!ctx)
		return;

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	wake_up_all(&ctx->mfc_wq);

	pr_debug("%s %s\n", __FUNCTION__, spu->name);
	if (ctx->mfc_fasync) {
		u32 free_elements, tagstatus;
		unsigned int mask;

		/* no need for spu_acquire in interrupt context */
		free_elements = ctx->ops->get_mfc_free_elements(ctx);
		tagstatus = ctx->ops->read_mfc_tagstatus(ctx);

		mask = 0;
		if (free_elements & 0xffff)
			mask |= POLLOUT;
		if (tagstatus & ctx->tagwait)
			mask |= POLLIN;

		kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
	}
}

static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
{
	/* See if there is one tag group is complete */
	/* FIXME we need locking around tagwait */
	*status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait;
	ctx->tagwait &= ~*status;
	if (*status)
		return 1;

	/* enable interrupt waiting for any tag group,
	   may silently fail if interrupts are already enabled */
	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
	return 0;
}

static ssize_t spufs_mfc_read(struct file *file, char __user *buffer,
			size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	int ret = -EINVAL;
	u32 status;

	if (size != 4)
		goto out;

	spu_acquire(ctx);
	if (file->f_flags & O_NONBLOCK) {
		status = ctx->ops->read_mfc_tagstatus(ctx);
		if (!(status & ctx->tagwait))
			ret = -EAGAIN;
		else
			ctx->tagwait &= ~status;
	} else {
		ret = spufs_wait(ctx->mfc_wq,
			   spufs_read_mfc_tagstatus(ctx, &status));
	}
	spu_release(ctx);

	if (ret)
		goto out;

	ret = 4;
	if (copy_to_user(buffer, &status, 4))
		ret = -EFAULT;

out:
	return ret;
}

static int spufs_check_valid_dma(struct mfc_dma_command *cmd)
{
	pr_debug("queueing DMA %x %lx %x %x %x\n", cmd->lsa,
		 cmd->ea, cmd->size, cmd->tag, cmd->cmd);

	switch (cmd->cmd) {
	case MFC_PUT_CMD:
	case MFC_PUTF_CMD:
	case MFC_PUTB_CMD:
	case MFC_GET_CMD:
	case MFC_GETF_CMD:
	case MFC_GETB_CMD:
		break;
	default:
		pr_debug("invalid DMA opcode %x\n", cmd->cmd);
		return -EIO;
	}

	if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) {
		pr_debug("invalid DMA alignment, ea %lx lsa %x\n",
				cmd->ea, cmd->lsa);
		return -EIO;
	}

	switch (cmd->size & 0xf) {
	case 1:
		break;
	case 2:
		if (cmd->lsa & 1)
			goto error;
		break;
	case 4:
		if (cmd->lsa & 3)
			goto error;
		break;
	case 8:
		if (cmd->lsa & 7)
			goto error;
		break;
	case 0:
		if (cmd->lsa & 15)
			goto error;
		break;
	error:
	default:
		pr_debug("invalid DMA alignment %x for size %x\n",
			cmd->lsa & 0xf, cmd->size);
		return -EIO;
	}

	if (cmd->size > 16 * 1024) {
		pr_debug("invalid DMA size %x\n", cmd->size);
		return -EIO;
	}

	if (cmd->tag & 0xfff0) {
		/* we reserve the higher tag numbers for kernel use */
		pr_debug("invalid DMA tag\n");
		return -EIO;
	}

	if (cmd->class) {
		/* not supported in this version */
		pr_debug("invalid DMA class\n");
		return -EIO;
	}

	return 0;
}

static int spu_send_mfc_command(struct spu_context *ctx,
				struct mfc_dma_command cmd,
				int *error)
{
	*error = ctx->ops->send_mfc_command(ctx, &cmd);
	if (*error == -EAGAIN) {
		/* wait for any tag group to complete
		   so we have space for the new command */
		ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1);
		/* try again, because the queue might be
		   empty again */
		*error = ctx->ops->send_mfc_command(ctx, &cmd);
		if (*error == -EAGAIN)
			return 0;
	}
	return 1;
}

static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
			size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	struct mfc_dma_command cmd;
	int ret = -EINVAL;

	if (size != sizeof cmd)
		goto out;

	ret = -EFAULT;
	if (copy_from_user(&cmd, buffer, sizeof cmd))
		goto out;

	ret = spufs_check_valid_dma(&cmd);
	if (ret)
		goto out;

1645 1646
	spu_acquire(ctx);
	ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
1647 1648 1649
	if (ret)
		goto out;

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
	if (file->f_flags & O_NONBLOCK) {
		ret = ctx->ops->send_mfc_command(ctx, &cmd);
	} else {
		int status;
		ret = spufs_wait(ctx->mfc_wq,
				 spu_send_mfc_command(ctx, cmd, &status));
		if (status)
			ret = status;
	}

	if (ret)
1661
		goto out_unlock;
1662 1663

	ctx->tagwait |= 1 << cmd.tag;
1664
	ret = size;
1665

1666 1667
out_unlock:
	spu_release(ctx);
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
out:
	return ret;
}

static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
{
	struct spu_context *ctx = file->private_data;
	u32 free_elements, tagstatus;
	unsigned int mask;

1678 1679
	poll_wait(file, &ctx->mfc_wq, wait);

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
	spu_acquire(ctx);
	ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2);
	free_elements = ctx->ops->get_mfc_free_elements(ctx);
	tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
	spu_release(ctx);

	mask = 0;
	if (free_elements & 0xffff)
		mask |= POLLOUT | POLLWRNORM;
	if (tagstatus & ctx->tagwait)
		mask |= POLLIN | POLLRDNORM;

	pr_debug("%s: free %d tagstatus %d tagwait %d\n", __FUNCTION__,
		free_elements, tagstatus, ctx->tagwait);

	return mask;
}

1698
static int spufs_mfc_flush(struct file *file, fl_owner_t id)
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
{
	struct spu_context *ctx = file->private_data;
	int ret;

	spu_acquire(ctx);
#if 0
/* this currently hangs */
	ret = spufs_wait(ctx->mfc_wq,
			 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2));
	if (ret)
		goto out;
	ret = spufs_wait(ctx->mfc_wq,
			 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait);
out:
#else
	ret = 0;
#endif
	spu_release(ctx);

	return ret;
}

static int spufs_mfc_fsync(struct file *file, struct dentry *dentry,
			   int datasync)
{
1724
	return spufs_mfc_flush(file, NULL);
1725 1726 1727 1728 1729 1730 1731 1732 1733
}

static int spufs_mfc_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;

	return fasync_helper(fd, file, on, &ctx->mfc_fasync);
}

1734
static const struct file_operations spufs_mfc_fops = {
1735
	.open	 = spufs_mfc_open,
1736
	.release = spufs_mfc_release,
1737 1738 1739 1740 1741 1742
	.read	 = spufs_mfc_read,
	.write	 = spufs_mfc_write,
	.poll	 = spufs_mfc_poll,
	.flush	 = spufs_mfc_flush,
	.fsync	 = spufs_mfc_fsync,
	.fasync	 = spufs_mfc_fasync,
1743
	.mmap	 = spufs_mfc_mmap,
1744 1745
};

1746
static int spufs_npc_set(void *data, u64 val)
1747 1748
{
	struct spu_context *ctx = data;
1749 1750 1751
	spu_acquire(ctx);
	ctx->ops->npc_write(ctx, val);
	spu_release(ctx);
1752 1753

	return 0;
1754 1755
}

1756
static u64 spufs_npc_get(struct spu_context *ctx)
1757 1758 1759
{
	return ctx->ops->npc_read(ctx);
}
1760 1761
DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
		       "0x%llx\n", SPU_ATTR_ACQUIRE);
1762

1763
static int spufs_decr_set(void *data, u64 val)
1764 1765 1766 1767 1768
{
	struct spu_context *ctx = data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	spu_acquire_saved(ctx);
	lscsa->decr.slot[0] = (u32) val;
1769
	spu_release_saved(ctx);
1770 1771

	return 0;
1772 1773
}

1774
static u64 spufs_decr_get(struct spu_context *ctx)
1775 1776
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1777 1778
	return lscsa->decr.slot[0];
}
1779 1780
DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED);
1781

1782
static int spufs_decr_status_set(void *data, u64 val)
1783 1784 1785
{
	struct spu_context *ctx = data;
	spu_acquire_saved(ctx);
1786 1787 1788 1789
	if (val)
		ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
	else
		ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
1790
	spu_release_saved(ctx);
1791 1792

	return 0;
1793 1794
}

1795
static u64 spufs_decr_status_get(struct spu_context *ctx)
1796
{
1797 1798 1799 1800
	if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING)
		return SPU_DECR_STATUS_RUNNING;
	else
		return 0;
1801
}
1802 1803 1804
DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
		       spufs_decr_status_set, "0x%llx\n",
		       SPU_ATTR_ACQUIRE_SAVED);
1805

1806
static int spufs_event_mask_set(void *data, u64 val)
1807 1808 1809 1810 1811
{
	struct spu_context *ctx = data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	spu_acquire_saved(ctx);
	lscsa->event_mask.slot[0] = (u32) val;
1812
	spu_release_saved(ctx);
1813 1814

	return 0;
1815 1816
}

1817
static u64 spufs_event_mask_get(struct spu_context *ctx)
1818 1819
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1820 1821 1822
	return lscsa->event_mask.slot[0];
}

1823 1824 1825
DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
		       spufs_event_mask_set, "0x%llx\n",
		       SPU_ATTR_ACQUIRE_SAVED);
1826

1827
static u64 spufs_event_status_get(struct spu_context *ctx)
1828 1829 1830 1831 1832
{
	struct spu_state *state = &ctx->csa;
	u64 stat;
	stat = state->spu_chnlcnt_RW[0];
	if (stat)
1833 1834 1835
		return state->spu_chnldata_RW[0];
	return 0;
}
1836 1837
DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
		       NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1838

1839
static int spufs_srr0_set(void *data, u64 val)
1840 1841 1842 1843 1844
{
	struct spu_context *ctx = data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	spu_acquire_saved(ctx);
	lscsa->srr0.slot[0] = (u32) val;
1845
	spu_release_saved(ctx);
1846 1847

	return 0;
1848 1849
}

1850
static u64 spufs_srr0_get(struct spu_context *ctx)
1851 1852
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
1853
	return lscsa->srr0.slot[0];
1854
}
1855 1856
DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
		       "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED)
1857

1858
static u64 spufs_id_get(struct spu_context *ctx)
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
{
	u64 num;

	if (ctx->state == SPU_STATE_RUNNABLE)
		num = ctx->spu->number;
	else
		num = (unsigned int)-1;

	return num;
}
1869 1870
DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n",
		       SPU_ATTR_ACQUIRE)
1871

1872
static u64 spufs_object_id_get(struct spu_context *ctx)
1873 1874
{
	/* FIXME: Should there really be no locking here? */
1875
	return ctx->object_id;
1876 1877
}

1878
static int spufs_object_id_set(void *data, u64 id)
1879 1880 1881
{
	struct spu_context *ctx = data;
	ctx->object_id = id;
1882 1883

	return 0;
1884 1885
}

1886 1887
DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
		       spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE);
1888

1889
static u64 spufs_lslr_get(struct spu_context *ctx)
1890 1891 1892
{
	return ctx->csa.priv2.spu_lslr_RW;
}
1893 1894
DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n",
		       SPU_ATTR_ACQUIRE_SAVED);
1895 1896 1897 1898 1899 1900 1901 1902 1903

static int spufs_info_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;
	file->private_data = ctx;
	return 0;
}

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
static int spufs_caps_show(struct seq_file *s, void *private)
{
	struct spu_context *ctx = s->private;

	if (!(ctx->flags & SPU_CREATE_NOSCHED))
		seq_puts(s, "sched\n");
	if (!(ctx->flags & SPU_CREATE_ISOLATE))
		seq_puts(s, "step\n");
	return 0;
}

static int spufs_caps_open(struct inode *inode, struct file *file)
{
	return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx);
}

static const struct file_operations spufs_caps_fops = {
	.open		= spufs_caps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
			char __user *buf, size_t len, loff_t *pos)
{
	u32 mbox_stat;
	u32 data;

	mbox_stat = ctx->csa.prob.mb_stat_R;
	if (mbox_stat & 0x0000ff) {
		data = ctx->csa.prob.pu_mb_R;
	}

	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
}

1941 1942 1943
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
				   size_t len, loff_t *pos)
{
1944
	int ret;
1945 1946 1947 1948 1949 1950 1951
	struct spu_context *ctx = file->private_data;

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	spu_acquire_saved(ctx);
	spin_lock(&ctx->csa.register_lock);
1952
	ret = __spufs_mbox_info_read(ctx, buf, len, pos);
1953
	spin_unlock(&ctx->csa.register_lock);
1954
	spu_release_saved(ctx);
1955

1956
	return ret;
1957 1958
}

1959
static const struct file_operations spufs_mbox_info_fops = {
1960 1961 1962 1963 1964
	.open = spufs_info_open,
	.read = spufs_mbox_info_read,
	.llseek  = generic_file_llseek,
};

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978
static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
				char __user *buf, size_t len, loff_t *pos)
{
	u32 ibox_stat;
	u32 data;

	ibox_stat = ctx->csa.prob.mb_stat_R;
	if (ibox_stat & 0xff0000) {
		data = ctx->csa.priv2.puint_mb_R;
	}

	return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
}

1979 1980 1981 1982
static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
				   size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
1983
	int ret;
1984 1985 1986 1987 1988 1989

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	spu_acquire_saved(ctx);
	spin_lock(&ctx->csa.register_lock);
1990
	ret = __spufs_ibox_info_read(ctx, buf, len, pos);
1991
	spin_unlock(&ctx->csa.register_lock);
1992
	spu_release_saved(ctx);
1993

1994
	return ret;
1995 1996
}

1997
static const struct file_operations spufs_ibox_info_fops = {
1998 1999 2000 2001 2002
	.open = spufs_info_open,
	.read = spufs_ibox_info_read,
	.llseek  = generic_file_llseek,
};

2003 2004
static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
			char __user *buf, size_t len, loff_t *pos)
2005 2006 2007 2008 2009
{
	int i, cnt;
	u32 data[4];
	u32 wbox_stat;

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
	wbox_stat = ctx->csa.prob.mb_stat_R;
	cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
	for (i = 0; i < cnt; i++) {
		data[i] = ctx->csa.spu_mailbox_data[i];
	}

	return simple_read_from_buffer(buf, len, pos, &data,
				cnt * sizeof(u32));
}

static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
				   size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	int ret;

2026 2027 2028 2029 2030
	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	spu_acquire_saved(ctx);
	spin_lock(&ctx->csa.register_lock);
2031
	ret = __spufs_wbox_info_read(ctx, buf, len, pos);
2032
	spin_unlock(&ctx->csa.register_lock);
2033
	spu_release_saved(ctx);
2034

2035
	return ret;
2036 2037
}

2038
static const struct file_operations spufs_wbox_info_fops = {
2039 2040 2041 2042 2043
	.open = spufs_info_open,
	.read = spufs_wbox_info_read,
	.llseek  = generic_file_llseek,
};

2044 2045
static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
			char __user *buf, size_t len, loff_t *pos)
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
{
	struct spu_dma_info info;
	struct mfc_cq_sr *qp, *spuqp;
	int i;

	info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
	info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
	info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
	info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
	info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
	for (i = 0; i < 16; i++) {
		qp = &info.dma_info_command_data[i];
		spuqp = &ctx->csa.priv2.spuq[i];

		qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
		qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
		qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
		qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
	}

	return simple_read_from_buffer(buf, len, pos, &info,
				sizeof info);
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
			      size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	int ret;

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	spu_acquire_saved(ctx);
	spin_lock(&ctx->csa.register_lock);
	ret = __spufs_dma_info_read(ctx, buf, len, pos);
	spin_unlock(&ctx->csa.register_lock);
2083
	spu_release_saved(ctx);
2084 2085 2086 2087

	return ret;
}

2088
static const struct file_operations spufs_dma_info_fops = {
2089 2090 2091 2092
	.open = spufs_info_open,
	.read = spufs_dma_info_read,
};

2093 2094
static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
			char __user *buf, size_t len, loff_t *pos)
2095 2096 2097
{
	struct spu_proxydma_info info;
	struct mfc_cq_sr *qp, *puqp;
2098
	int ret = sizeof info;
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
	int i;

	if (len < ret)
		return -EINVAL;

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
	info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
	info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
	for (i = 0; i < 8; i++) {
		qp = &info.proxydma_info_command_data[i];
		puqp = &ctx->csa.priv2.puq[i];

		qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
		qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
		qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
		qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
	}
2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132

	return simple_read_from_buffer(buf, len, pos, &info,
				sizeof info);
}

static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
				   size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	int ret;

	spu_acquire_saved(ctx);
	spin_lock(&ctx->csa.register_lock);
	ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
2133
	spin_unlock(&ctx->csa.register_lock);
2134
	spu_release_saved(ctx);
2135 2136 2137 2138

	return ret;
}

2139
static const struct file_operations spufs_proxydma_info_fops = {
2140 2141 2142 2143
	.open = spufs_info_open,
	.read = spufs_proxydma_info_read,
};

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
static int spufs_show_tid(struct seq_file *s, void *private)
{
	struct spu_context *ctx = s->private;

	seq_printf(s, "%d\n", ctx->tid);
	return 0;
}

static int spufs_tid_open(struct inode *inode, struct file *file)
{
	return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx);
}

static const struct file_operations spufs_tid_fops = {
	.open		= spufs_tid_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

2164 2165 2166 2167 2168
static const char *ctx_state_names[] = {
	"user", "system", "iowait", "loaded"
};

static unsigned long long spufs_acct_time(struct spu_context *ctx,
2169
		enum spu_utilization_state state)
2170
{
2171 2172
	struct timespec ts;
	unsigned long long time = ctx->stats.times[state];
2173

2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
	/*
	 * In general, utilization statistics are updated by the controlling
	 * thread as the spu context moves through various well defined
	 * state transitions, but if the context is lazily loaded its
	 * utilization statistics are not updated as the controlling thread
	 * is not tightly coupled with the execution of the spu context.  We
	 * calculate and apply the time delta from the last recorded state
	 * of the spu context.
	 */
	if (ctx->spu && ctx->stats.util_state == state) {
		ktime_get_ts(&ts);
		time += timespec_to_ns(&ts) - ctx->stats.tstamp;
	}
2187

2188
	return time / NSEC_PER_MSEC;
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
}

static unsigned long long spufs_slb_flts(struct spu_context *ctx)
{
	unsigned long long slb_flts = ctx->stats.slb_flt;

	if (ctx->state == SPU_STATE_RUNNABLE) {
		slb_flts += (ctx->spu->stats.slb_flt -
			     ctx->stats.slb_flt_base);
	}

	return slb_flts;
}

static unsigned long long spufs_class2_intrs(struct spu_context *ctx)
{
	unsigned long long class2_intrs = ctx->stats.class2_intr;

	if (ctx->state == SPU_STATE_RUNNABLE) {
		class2_intrs += (ctx->spu->stats.class2_intr -
				 ctx->stats.class2_intr_base);
	}

	return class2_intrs;
}


static int spufs_show_stat(struct seq_file *s, void *private)
{
	struct spu_context *ctx = s->private;

	spu_acquire(ctx);
	seq_printf(s, "%s %llu %llu %llu %llu "
		      "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2223 2224 2225 2226 2227
		ctx_state_names[ctx->stats.util_state],
		spufs_acct_time(ctx, SPU_UTIL_USER),
		spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
		spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
		spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252
		ctx->stats.vol_ctx_switch,
		ctx->stats.invol_ctx_switch,
		spufs_slb_flts(ctx),
		ctx->stats.hash_flt,
		ctx->stats.min_flt,
		ctx->stats.maj_flt,
		spufs_class2_intrs(ctx),
		ctx->stats.libassist);
	spu_release(ctx);
	return 0;
}

static int spufs_stat_open(struct inode *inode, struct file *file)
{
	return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx);
}

static const struct file_operations spufs_stat_fops = {
	.open		= spufs_stat_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};


2253
struct tree_descr spufs_dir_contents[] = {
2254
	{ "capabilities", &spufs_caps_fops, 0444, },
2255
	{ "mem",  &spufs_mem_fops,  0666, },
2256
	{ "regs", &spufs_regs_fops,  0666, },
2257 2258 2259 2260 2261 2262
	{ "mbox", &spufs_mbox_fops, 0444, },
	{ "ibox", &spufs_ibox_fops, 0444, },
	{ "wbox", &spufs_wbox_fops, 0222, },
	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2263 2264
	{ "signal1", &spufs_signal1_fops, 0666, },
	{ "signal2", &spufs_signal2_fops, 0666, },
2265 2266
	{ "signal1_type", &spufs_signal1_type, 0666, },
	{ "signal2_type", &spufs_signal2_type, 0666, },
2267
	{ "cntl", &spufs_cntl_fops,  0666, },
2268
	{ "fpcr", &spufs_fpcr_fops, 0666, },
2269 2270 2271 2272 2273
	{ "lslr", &spufs_lslr_ops, 0444, },
	{ "mfc", &spufs_mfc_fops, 0666, },
	{ "mss", &spufs_mss_fops, 0666, },
	{ "npc", &spufs_npc_ops, 0666, },
	{ "srr0", &spufs_srr0_ops, 0666, },
2274 2275 2276
	{ "decr", &spufs_decr_ops, 0666, },
	{ "decr_status", &spufs_decr_status_ops, 0666, },
	{ "event_mask", &spufs_event_mask_ops, 0666, },
2277
	{ "event_status", &spufs_event_status_ops, 0444, },
2278
	{ "psmap", &spufs_psmap_fops, 0666, },
2279 2280
	{ "phys-id", &spufs_id_ops, 0666, },
	{ "object-id", &spufs_object_id_ops, 0666, },
2281 2282 2283
	{ "mbox_info", &spufs_mbox_info_fops, 0444, },
	{ "ibox_info", &spufs_ibox_info_fops, 0444, },
	{ "wbox_info", &spufs_wbox_info_fops, 0444, },
2284 2285
	{ "dma_info", &spufs_dma_info_fops, 0444, },
	{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
2286
	{ "tid", &spufs_tid_fops, 0444, },
2287
	{ "stat", &spufs_stat_fops, 0444, },
2288 2289
	{},
};
2290 2291

struct tree_descr spufs_dir_nosched_contents[] = {
2292
	{ "capabilities", &spufs_caps_fops, 0444, },
2293 2294 2295 2296 2297 2298 2299
	{ "mem",  &spufs_mem_fops,  0666, },
	{ "mbox", &spufs_mbox_fops, 0444, },
	{ "ibox", &spufs_ibox_fops, 0444, },
	{ "wbox", &spufs_wbox_fops, 0222, },
	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
2300 2301
	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
2302 2303 2304 2305 2306 2307 2308 2309 2310
	{ "signal1_type", &spufs_signal1_type, 0666, },
	{ "signal2_type", &spufs_signal2_type, 0666, },
	{ "mss", &spufs_mss_fops, 0666, },
	{ "mfc", &spufs_mfc_fops, 0666, },
	{ "cntl", &spufs_cntl_fops,  0666, },
	{ "npc", &spufs_npc_ops, 0666, },
	{ "psmap", &spufs_psmap_fops, 0666, },
	{ "phys-id", &spufs_id_ops, 0666, },
	{ "object-id", &spufs_object_id_ops, 0666, },
2311
	{ "tid", &spufs_tid_fops, 0444, },
2312
	{ "stat", &spufs_stat_fops, 0444, },
2313 2314
	{},
};
2315 2316

struct spufs_coredump_reader spufs_coredump_read[] = {
2317 2318
	{ "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
	{ "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
2319 2320 2321
	{ "lslr", NULL, spufs_lslr_get, 19 },
	{ "decr", NULL, spufs_decr_get, 19 },
	{ "decr_status", NULL, spufs_decr_status_get, 19 },
2322 2323
	{ "mem", __spufs_mem_read, NULL, LS_SIZE, },
	{ "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
2324
	{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
2325
	{ "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
2326 2327 2328
	{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
	{ "event_mask", NULL, spufs_event_mask_get, 19 },
	{ "event_status", NULL, spufs_event_status_get, 19 },
2329 2330 2331 2332 2333 2334
	{ "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
	{ "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
	{ "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
	{ "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
	{ "proxydma_info", __spufs_proxydma_info_read,
			   NULL, sizeof(struct spu_proxydma_info)},
2335 2336
	{ "object-id", NULL, spufs_object_id_get, 19 },
	{ "npc", NULL, spufs_npc_get, 19 },
2337
	{ NULL },
2338
};