hub.fuc 16.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/* fuc microcode for nvc0 PGRAPH/HUB
 *
 * Copyright 2011 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs
 */

#ifdef INCLUDE_DATA
27 28 29
hub_mmio_list_head:	.b32 #hub_mmio_list_base
hub_mmio_list_tail:	.b32 #hub_mmio_list_next

30 31 32 33 34 35 36 37 38 39 40 41 42 43
gpc_count:		.b32 0
rop_count:		.b32 0
cmd_queue:		queue_init

ctx_current:		.b32 0

.align 256
chan_data:
chan_mmio_count:	.b32 0
chan_mmio_address:	.b32 0

.align 256
xfer_data: 		.skip 256

44 45 46
hub_mmio_list_base:
.b32 0x0417e91c // 0x17e91c, 2
hub_mmio_list_next:
47 48 49 50 51
#endif

#ifdef INCLUDE_CODE
// reports an exception to the host
//
52
// In: $r15 error code (see os.h)
53 54
//
error:
55
	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
56
	mov $r15 1
57
	nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r15)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
	ret

// HUB fuc initialisation, executed by triggering ucode start, will
// fall through to main loop after completion.
//
// Output:
//   CC_SCRATCH[0]:
//	     31:31: set to signal completion
//   CC_SCRATCH[1]:
//	      31:0: total PGRAPH context size
//
init:
	clear b32 $r0
	mov $xdbase $r0

73 74 75 76 77 78
	// setup stack
	nv_iord($r1, NV_PGRAPH_FECS_CAPS, 0)
	extr $r1 $r1 9:17
	shl b32 $r1 8
	mov $sp $r1

79
	// enable fifo access
80 81
	mov $r2 NV_PGRAPH_FECS_ACCESS_FIFO
	nv_iowr(NV_PGRAPH_FECS_ACCESS, 0, $r2)
82 83 84 85

	// setup i0 handler, and route all interrupts to it
	mov $r1 #ih
	mov $iv0 $r1
86 87 88

	clear b32 $r2
	nv_iowr(NV_PGRAPH_FECS_INTR_ROUTE, 0, $r2)
89

90 91
	// route HUB_CHSW_PULSE to fuc interrupt 8
	mov $r2 0x2003		// { HUB_CHSW_PULSE, ZERO } -> intr 8
92
	nv_iowr(NV_PGRAPH_FECS_IROUTE, 0, $r2)
93 94 95 96 97

	// not sure what these are, route them because NVIDIA does, and
	// the IRQ handler will signal the host if we ever get one.. we
	// may find out if/why we need to handle these if so..
	//
98 99
	mov $r2 0x2004		// { 0x04, ZERO } -> intr 9
	nv_iowr(NV_PGRAPH_FECS_IROUTE, 1, $r2)
100
	mov $r2 0x200b		// { HUB_FIRMWARE_MTHD, ZERO } -> intr 10
101 102 103
	nv_iowr(NV_PGRAPH_FECS_IROUTE, 2, $r2)
	mov $r2 0x200c		// { 0x0c, ZERO } -> intr 15
	nv_iowr(NV_PGRAPH_FECS_IROUTE, 7, $r2)
104 105

	// enable all INTR_UP interrupts
106 107
	sub b32 $r3 $r0 1
	nv_iowr(NV_PGRAPH_FECS_INTR_UP_EN, 0, $r3)
108

109
	// enable fifo, ctxsw, 9, fwmthd, 15 interrupts
110 111
	imm32($r2, 0x8704)
	nv_iowr(NV_PGRAPH_FECS_INTR_EN_SET, 0, $r2)
112 113

	// fifo level triggered, rest edge
114 115
	mov $r2 NV_PGRAPH_FECS_INTR_MODE_FIFO_LEVEL
	nv_iowr(NV_PGRAPH_FECS_INTR_MODE, 0, $r2)
116 117 118 119 120

	// enable interrupts
	bset $flags ie0

	// fetch enabled GPC/ROP counts
121
	nv_rd32($r14, 0x409604)
122 123 124 125 126 127 128 129 130
	extr $r1 $r15 16:20
	st b32 D[$r0 + #rop_count] $r1
	and $r15 0x1f
	st b32 D[$r0 + #gpc_count] $r15

	// set BAR_REQMASK to GPC mask
	mov $r1 1
	shl b32 $r1 $r15
	sub b32 $r1 1
131 132
	nv_iowr(NV_PGRAPH_FECS_BAR_MASK0, 0, $r1)
	nv_iowr(NV_PGRAPH_FECS_BAR_MASK1, 0, $r1)
133 134 135 136

	// context size calculation, reserve first 256 bytes for use by fuc
	mov $r1 256

137 138 139 140 141 142 143
	//
	mov $r15 2
	call(ctx_4170s)
	call(ctx_4170w)
	mov $r15 0x10
	call(ctx_86c)

144
	// calculate size of mmio context data
145 146
	ld b32 $r14 D[$r0 + #hub_mmio_list_head]
	ld b32 $r15 D[$r0 + #hub_mmio_list_tail]
147
	call(mmctx_size)
148 149 150 151

	// set mmctx base addresses now so we don't have to do it later,
	// they don't (currently) ever change
	shr b32 $r4 $r1 8
152 153
	nv_iowr(NV_PGRAPH_FECS_MMCTX_SAVE_SWBASE, 0, $r4)
	nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_SWBASE, 0, $r4)
154 155 156
	add b32 $r3 0x1300
	add b32 $r1 $r15
	shr b32 $r15 2
157
	nv_iowr(NV_PGRAPH_FECS_MMCTX_LOAD_COUNT, 0, $r15) // wtf??
158 159 160 161 162 163

	// strands, base offset needs to be aligned to 256 bytes
	shr b32 $r1 8
	add b32 $r1 1
	shl b32 $r1 8
	mov b32 $r15 $r1
164
	call(strand_ctx_init)
165 166 167 168 169 170 171 172 173 174 175
	add b32 $r1 $r15

	// initialise each GPC in sequence by passing in the offset of its
	// context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
	// has previously been uploaded by the host) running.
	//
	// the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
	// when it has completed, and return the size of its context data
	// in GPCn_CC_SCRATCH[1]
	//
	ld b32 $r3 D[$r0 + #gpc_count]
176
	imm32($r4, 0x502000)
177 178 179 180
	init_gpc:
		// setup, and start GPC ucode running
		add b32 $r14 $r4 0x804
		mov b32 $r15 $r1
181
		call(nv_wr32)			// CC_SCRATCH[1] = ctx offset
182 183
		add b32 $r14 $r4 0x10c
		clear b32 $r15
184
		call(nv_wr32)
185
		add b32 $r14 $r4 0x104
186
		call(nv_wr32)			// ENTRY
187 188
		add b32 $r14 $r4 0x100
		mov $r15 2			// CTRL_START_TRIGGER
189
		call(nv_wr32)			// CTRL
190 191 192 193

		// wait for it to complete, and adjust context size
		add b32 $r14 $r4 0x800
		init_gpc_wait:
194
			call(nv_rd32)
195 196 197
			xbit $r15 $r15 31
			bra e #init_gpc_wait
		add b32 $r14 $r4 0x804
198
		call(nv_rd32)
199 200 201 202 203 204 205
		add b32 $r1 $r15

		// next!
		add b32 $r4 0x8000
		sub b32 $r3 1
		bra ne #init_gpc

206 207 208 209 210 211
	//
	mov $r15 0
	call(ctx_86c)
	mov $r15 0
	call(ctx_4170s)

212
	// save context size, and tell host we're ready
213
	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(1), 0, $r1)
214 215
	clear b32 $r1
	bset $r1 31
216
	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r1)
217 218 219 220 221 222 223 224 225

// Main program loop, very simple, sleeps until woken up by the interrupt
// handler, pulls a command from the queue and executes its handler
//
main:
	// sleep until we have something to do
	bset $flags $p0
	sleep $p0
	mov $r13 #cmd_queue
226
	call(queue_get)
227 228 229 230 231 232
	bra $p1 #main

	// context switch, requested by GPU?
	cmpu b32 $r14 0x4001
	bra ne #main_not_ctx_switch
		trace_set(T_AUTO)
233 234
		nv_iord($r1, NV_PGRAPH_FECS_CHAN_ADDR, 0)
		nv_iord($r2, NV_PGRAPH_FECS_CHAN_NEXT, 0)
235 236 237 238 239 240 241 242 243 244

		xbit $r3 $r1 31
		bra e #chsw_no_prev
			xbit $r3 $r2 31
			bra e #chsw_prev_no_next
				push $r2
				mov b32 $r2 $r1
				trace_set(T_SAVE)
				bclr $flags $p1
				bset $flags $p2
245
				call(ctx_xfer)
246 247 248 249
				trace_clr(T_SAVE);
				pop $r2
				trace_set(T_LOAD);
				bset $flags $p1
250
				call(ctx_xfer)
251 252 253 254 255 256 257
				trace_clr(T_LOAD);
				bra #chsw_done
			chsw_prev_no_next:
				push $r2
				mov b32 $r2 $r1
				bclr $flags $p1
				bclr $flags $p2
258
				call(ctx_xfer)
259
				pop $r2
260
				nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
261 262 263 264 265 266
				bra #chsw_done
		chsw_no_prev:
			xbit $r3 $r2 31
			bra e #chsw_done
				bset $flags $p1
				bclr $flags $p2
267
				call(ctx_xfer)
268 269 270

		// ack the context switch request
		chsw_done:
271 272
		mov $r2 NV_PGRAPH_FECS_CHSW_ACK
		nv_iowr(NV_PGRAPH_FECS_CHSW, 0, $r2)
273 274 275 276 277 278 279 280
		trace_clr(T_AUTO)
		bra #main

	// request to set current channel? (*not* a context switch)
	main_not_ctx_switch:
	cmpu b32 $r14 0x0001
	bra ne #main_not_ctx_chan
		mov b32 $r2 $r15
281
		call(ctx_chan)
282 283 284 285 286 287 288 289 290
		bra #main_done

	// request to store current channel context?
	main_not_ctx_chan:
	cmpu b32 $r14 0x0002
	bra ne #main_not_ctx_save
		trace_set(T_SAVE)
		bclr $flags $p1
		bclr $flags $p2
291
		call(ctx_xfer)
292 293 294 295 296 297
		trace_clr(T_SAVE)
		bra #main_done

	main_not_ctx_save:
		shl b32 $r15 $r14 16
		or $r15 E_BAD_COMMAND
298
		call(error)
299 300 301 302 303
		bra #main

	main_done:
	clear b32 $r2
	bset $r2 31
304
	nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_SET(0), 0, $r2)
305 306 307 308 309 310 311 312 313 314 315 316 317
	bra #main

// interrupt handler
ih:
	push $r8
	mov $r8 $flags
	push $r8
	push $r9
	push $r10
	push $r11
	push $r13
	push $r14
	push $r15
318
	clear b32 $r0
319 320

	// incoming fifo command?
321 322
	nv_iord($r10, NV_PGRAPH_FECS_INTR, 0)
	and $r11 $r10 NV_PGRAPH_FECS_INTR_FIFO
323 324 325
	bra e #ih_no_fifo
		// queue incoming fifo command for later processing
		mov $r13 #cmd_queue
326 327 328
		nv_iord($r14, NV_PGRAPH_FECS_FIFO_CMD, 0)
		nv_iord($r15, NV_PGRAPH_FECS_FIFO_DATA, 0)
		call(queue_put)
329 330
		add b32 $r11 0x400
		mov $r14 1
331
		nv_iowr(NV_PGRAPH_FECS_FIFO_ACK, 0, $r14)
332 333 334

	// context switch request?
	ih_no_fifo:
335
	and $r11 $r10 NV_PGRAPH_FECS_INTR_CHSW
336 337 338 339
	bra e #ih_no_ctxsw
		// enqueue a context switch for later processing
		mov $r13 #cmd_queue
		mov $r14 0x4001
340
		call(queue_put)
341

342
	// firmware method?
343
	ih_no_ctxsw:
344 345
	and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
	bra e #ih_no_fwmthd
346 347 348 349 350 351 352 353 354 355 356 357 358
		// none we handle; report to host and ack
		nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
		nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
		nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
		nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
		extr $r14 $r15 16:18
		shl b32 $r14 $r14 2
		imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
		add b32 $r14 $r15
		call(nv_rd32)
		nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
		mov $r15 E_BAD_FWMTHD
		call(error)
359 360 361 362 363
		mov $r11 0x100
		nv_wr32(0x400144, $r11)

	// anything we didn't handle, bring it to the host's attention
	ih_no_fwmthd:
364
	mov $r11 0x504 // FIFO | CHSW | FWMTHD
365 366 367
	not b32 $r11
	and $r11 $r10 $r11
	bra e #ih_no_other
368
		nv_iowr(NV_PGRAPH_FECS_INTR_UP_SET, 0, $r11)
369 370 371

	// ack, and wake up main()
	ih_no_other:
372
	nv_iowr(NV_PGRAPH_FECS_INTR_ACK, 0, $r10)
373 374 375 376 377 378 379 380 381 382 383 384 385

	pop $r15
	pop $r14
	pop $r13
	pop $r11
	pop $r10
	pop $r9
	pop $r8
	mov $flags $r8
	pop $r8
	bclr $flags $p0
	iret

M
Maarten Lankhorst 已提交
386
#if CHIPSET < GK100
387 388 389
// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
ctx_4160s:
	mov $r15 1
390
	nv_wr32(0x404160, $r15)
391
	ctx_4160s_wait:
392
		nv_rd32($r15, 0x404160)
393 394 395 396 397 398 399 400 401
		xbit $r15 $r15 4
		bra e #ctx_4160s_wait
	ret

// Without clearing again at end of xfer, some things cause PGRAPH
// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
// still function with it set however...
ctx_4160c:
	clear b32 $r15
402
	nv_wr32(0x404160, $r15)
403 404 405 406 407 408 409 410 411
	ret
#endif

// Again, not real sure
//
// In: $r15 value to set 0x404170 to
//
ctx_4170s:
	or $r15 0x10
412
	nv_wr32(0x404170, $r15)
413 414 415 416 417
	ret

// Waits for a ctx_4170s() call to complete
//
ctx_4170w:
418
	nv_rd32($r15, 0x404170)
419 420 421 422 423 424 425 426 427 428 429
	and $r15 0x10
	bra ne #ctx_4170w
	ret

// Disables various things, waits a bit, and re-enables them..
//
// Not sure how exactly this helps, perhaps "ENABLE" is not such a
// good description for the bits we turn off?  Anyways, without this,
// funny things happen.
//
ctx_redswitch:
430 431 432 433 434
	mov $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_GPC
	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_ROP
	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_GPC
	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_POWER_MAIN
	nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
435 436 437 438
	mov $r15 8
	ctx_redswitch_delay:
		sub b32 $r15 1
		bra ne #ctx_redswitch_delay
439 440 441
	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_ROP
	or  $r14 NV_PGRAPH_FECS_RED_SWITCH_ENABLE_MAIN
	nv_iowr(NV_PGRAPH_FECS_RED_SWITCH, 0, $r14)
442 443 444 445 446 447 448 449
	ret

// Not a clue what this is for, except that unless the value is 0x10, the
// strand context is saved (and presumably restored) incorrectly..
//
// In: $r15 value to set to (0x00/0x10 are used)
//
ctx_86c:
450
	nv_iowr(NV_PGRAPH_FECS_UNK86C, 0, $r15)
451
	nv_wr32(0x408a14, $r15)
452
	nv_wr32(NV_PGRAPH_GPCX_GPCCS_UNK86C, $r15)
453 454 455 456 457 458 459 460 461
	ret

// In: $r15 NV_PGRAPH_FECS_MEM_CMD_*
ctx_mem:
	nv_iowr(NV_PGRAPH_FECS_MEM_CMD, 0, $r15)
	ctx_mem_wait:
		nv_iord($r15, NV_PGRAPH_FECS_MEM_CMD, 0)
		or $r15 $r15
		bra ne #ctx_mem_wait
462 463 464 465 466 467 468 469 470 471 472
	ret

// ctx_load - load's a channel's ctxctl data, and selects its vm
//
// In: $r2 channel address
//
ctx_load:
	trace_set(T_CHAN)

	// switch to channel, somewhat magic in parts..
	mov $r10 12		// DONE_UNK12
473 474 475 476 477 478 479 480
	call(wait_donez)
	clear b32 $r15
	nv_iowr(0x409a24, 0, $r15)
	nv_iowr(NV_PGRAPH_FECS_CHAN_NEXT, 0, $r2)
	nv_iowr(NV_PGRAPH_FECS_MEM_CHAN, 0, $r2)
	mov $r15 NV_PGRAPH_FECS_MEM_CMD_LOAD_CHAN
	call(ctx_mem)
	nv_iowr(NV_PGRAPH_FECS_CHAN_ADDR, 0, $r2)
481 482 483 484 485 486 487 488

	// load channel header, fetch PGRAPH context pointer
	mov $xtargets $r0
	bclr $r2 31
	shl b32 $r2 4
	add b32 $r2 2

	trace_set(T_LCHAN)
489 490 491 492
	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r2)
	imm32($r2, NV_PGRAPH_FECS_MEM_TARGET_UNK31)
	or  $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VRAM
	nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
	mov $r1 0x10			// chan + 0x0210
	mov $r2 #xfer_data
	sethi $r2 0x00020000		// 16 bytes
	xdld $r1 $r2
	xdwait
	trace_clr(T_LCHAN)

	// update current context
	ld b32 $r1 D[$r0 + #xfer_data + 4]
	shl b32 $r1 24
	ld b32 $r2 D[$r0 + #xfer_data + 0]
	shr b32 $r2 8
	or $r1 $r2
	st b32 D[$r0 + #ctx_current] $r1

	// set transfer base to start of context, and fetch context header
	trace_set(T_LCTXH)
510 511 512
	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r1)
	mov $r2 NV_PGRAPH_FECS_MEM_TARGET_AS_VM
	nv_iowr(NV_PGRAPH_FECS_MEM_TARGET, 0, $r2)
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	mov $r1 #chan_data
	sethi $r1 0x00060000		// 256 bytes
	xdld $r0 $r1
	xdwait
	trace_clr(T_LCTXH)

	trace_clr(T_CHAN)
	ret

// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
//            the active channel for ctxctl, but not actually transfer
//            any context data.  intended for use only during initial
//            context construction.
//
// In: $r2 channel address
//
ctx_chan:
M
Maarten Lankhorst 已提交
530
#if CHIPSET < GK100
531
	call(ctx_4160s)
532
#endif
533
	call(ctx_load)
534
	mov $r10 12			// DONE_UNK12
535 536 537
	call(wait_donez)
	mov $r15 5 // MEM_CMD 5 ???
	call(ctx_mem)
M
Maarten Lankhorst 已提交
538
#if CHIPSET < GK100
539
	call(ctx_4160c)
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
#endif
	ret

// Execute per-context state overrides list
//
// Only executed on the first load of a channel.  Might want to look into
// removing this and having the host directly modify the channel's context
// to change this state...  The nouveau DRM already builds this list as
// it's definitely needed for NVIDIA's, so we may as well use it for now
//
// Input: $r1 mmio list length
//
ctx_mmio_exec:
	// set transfer base to be the mmio list
	ld b32 $r3 D[$r0 + #chan_mmio_address]
555
	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570

	clear b32 $r3
	ctx_mmio_loop:
		// fetch next 256 bytes of mmio list if necessary
		and $r4 $r3 0xff
		bra ne #ctx_mmio_pull
			mov $r5 #xfer_data
			sethi $r5 0x00060000	// 256 bytes
			xdld $r3 $r5
			xdwait

		// execute a single list entry
		ctx_mmio_pull:
		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
571
		call(nv_wr32)
572 573 574 575 576 577 578 579 580

		// next!
		add b32 $r3 8
		sub b32 $r1 1
		bra ne #ctx_mmio_loop

	// set transfer base back to the current context
	ctx_mmio_done:
	ld b32 $r3 D[$r0 + #ctx_current]
581
	nv_iowr(NV_PGRAPH_FECS_MEM_BASE, 0, $r3)
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	// disable the mmio list now, we don't need/want to execute it again
	st b32 D[$r0 + #chan_mmio_count] $r0
	mov $r1 #chan_data
	sethi $r1 0x00060000		// 256 bytes
	xdst $r0 $r1
	xdwait
	ret

// Transfer HUB context data between GPU and storage area
//
// In: $r2 channel address
//     $p1 clear on save, set on load
//     $p2 set if opposite direction done/will be done, so:
//		on save it means: "a load will follow this save"
//		on load it means: "a save preceeded this load"
//
ctx_xfer:
	// according to mwk, some kind of wait for idle
	mov $r14 4
602
	nv_iowr(0x409c08, 0, $r14)
603
	ctx_xfer_idle:
604
		nv_iord($r14, 0x409c00, 0)
605 606 607 608 609 610 611
		and $r14 0x2000
		bra ne #ctx_xfer_idle

	bra not $p1 #ctx_xfer_pre
	bra $p2 #ctx_xfer_pre_load
	ctx_xfer_pre:
		mov $r15 0x10
612
		call(ctx_86c)
M
Maarten Lankhorst 已提交
613
#if CHIPSET < GK100
614
		call(ctx_4160s)
615 616 617 618 619
#endif
		bra not $p1 #ctx_xfer_exec

	ctx_xfer_pre_load:
		mov $r15 2
620 621 622
		call(ctx_4170s)
		call(ctx_4170w)
		call(ctx_redswitch)
623
		clear b32 $r15
624 625
		call(ctx_4170s)
		call(ctx_load)
626 627 628 629

	// fetch context pointer, and initiate xfer on all GPCs
	ctx_xfer_exec:
	ld b32 $r1 D[$r0 + #ctx_current]
630 631 632 633 634

	clear b32 $r2
	nv_iowr(NV_PGRAPH_FECS_BAR, 0, $r2)

	nv_wr32(0x41a500, $r1)	// GPC_BCAST_WRCMD_DATA = ctx pointer
635 636 637 638
	xbit $r15 $flags $p1
	xbit $r2 $flags $p2
	shl b32 $r2 1
	or $r15 $r2
639
	nv_wr32(0x41a504, $r15)	// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
640 641

	// strands
642 643 644 645 646 647
	call(strand_pre)
	clear b32 $r2
	nv_iowr(NV_PGRAPH_FECS_STRAND_SELECT, 0x3f, $r2)
	xbit $r2 $flags $p1	// SAVE/LOAD
	add b32 $r2 NV_PGRAPH_FECS_STRAND_CMD_SAVE
	nv_iowr(NV_PGRAPH_FECS_STRAND_CMD, 0x3f, $r2)
648 649 650 651 652 653 654 655

	// mmio context
	xbit $r10 $flags $p1	// direction
	or $r10 6		// first, last
	mov $r11 0		// base = 0
	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
	mov $r14 0		// not multi
656
	call(mmctx_xfer)
657 658 659

	// wait for GPCs to all complete
	mov $r10 8		// DONE_BAR
660
	call(wait_doneo)
661 662

	// wait for strand xfer to complete
663
	call(strand_wait)
664 665 666 667

	// post-op
	bra $p1 #ctx_xfer_post
		mov $r10 12		// DONE_UNK12
668 669 670
		call(wait_donez)
		mov $r15 5 // MEM_CMD 5 ???
		call(ctx_mem)
671 672 673 674

	bra $p2 #ctx_xfer_done
	ctx_xfer_post:
		mov $r15 2
675
		call(ctx_4170s)
676
		clear b32 $r15
677 678 679
		call(ctx_86c)
		call(strand_post)
		call(ctx_4170w)
680
		clear b32 $r15
681
		call(ctx_4170s)
682 683 684 685 686

		bra not $p1 #ctx_xfer_no_post_mmio
		ld b32 $r1 D[$r0 + #chan_mmio_count]
		or $r1 $r1
		bra e #ctx_xfer_no_post_mmio
687
			call(ctx_mmio_exec)
688 689

		ctx_xfer_no_post_mmio:
M
Maarten Lankhorst 已提交
690
#if CHIPSET < GK100
691
		call(ctx_4160c)
692 693 694 695 696
#endif

	ctx_xfer_done:
	ret
#endif