vc4_validate_shaders.c 27.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright © 2014 Broadcom
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

/**
 * DOC: Shader validator for VC4.
 *
27 28 29 30 31 32
 * Since the VC4 has no IOMMU between it and system memory, a user
 * with access to execute shaders could escalate privilege by
 * overwriting system memory (using the VPM write address register in
 * the general-purpose DMA mode) or reading system memory it shouldn't
 * (reading it as a texture, uniform data, or direct-addressed TMU
 * lookup).
33
 *
34 35 36
 * The shader validator walks over a shader's BO, ensuring that its
 * accesses are appropriately bounded, and recording where texture
 * accesses are made so that we can do relocations for them in the
37
 * uniform stream.
38 39 40 41
 *
 * Shader BO are immutable for their lifetimes (enforced by not
 * allowing mmaps, GEM prime export, or rendering to from a CL), so
 * this validation is only performed at BO creation time.
42 43 44 45 46
 */

#include "vc4_drv.h"
#include "vc4_qpu_defines.h"

47 48
#define LIVE_REG_COUNT (32 + 32 + 4)

49
struct vc4_shader_validation_state {
50 51 52 53 54 55 56 57
	/* Current IP being validated. */
	uint32_t ip;

	/* IP at the end of the BO, do not read shader[max_ip] */
	uint32_t max_ip;

	uint64_t *shader;

58 59 60 61 62 63 64 65 66
	struct vc4_texture_sample_info tmu_setup[2];
	int tmu_write_count[2];

	/* For registers that were last written to by a MIN instruction with
	 * one argument being a uniform, the address of the uniform.
	 * Otherwise, ~0.
	 *
	 * This is used for the validation of direct address memory reads.
	 */
67 68 69
	uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
	bool live_max_clamp_regs[LIVE_REG_COUNT];
	uint32_t live_immediates[LIVE_REG_COUNT];
70 71 72 73 74 75 76

	/* Bitfield of which IPs are used as branch targets.
	 *
	 * Used for validation that the uniform stream is updated at the right
	 * points and clearing the texturing/clamping state.
	 */
	unsigned long *branch_targets;
77 78 79 80 81 82 83 84 85 86 87 88 89 90

	/* Set when entering a basic block, and cleared when the uniform
	 * address update is found.  This is used to make sure that we don't
	 * read uniforms when the address is undefined.
	 */
	bool needs_uniform_address_update;

	/* Set when we find a backwards branch.  If the branch is backwards,
	 * the taraget is probably doing an address reset to read uniforms,
	 * and so we need to be sure that a uniforms address is present in the
	 * stream, even if the shader didn't need to read uniforms in later
	 * basic blocks.
	 */
	bool needs_uniform_address_for_loop;
91 92 93 94 95 96 97

	/* Set when we find an instruction writing the top half of the
	 * register files.  If we allowed writing the unusable regs in
	 * a threaded shader, then the other shader running on our
	 * QPU's clamp validation would be invalid.
	 */
	bool all_registers_used;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
};

static uint32_t
waddr_to_live_reg_index(uint32_t waddr, bool is_b)
{
	if (waddr < 32) {
		if (is_b)
			return 32 + waddr;
		else
			return waddr;
	} else if (waddr <= QPU_W_ACC3) {
		return 64 + waddr - QPU_W_ACC0;
	} else {
		return ~0;
	}
}

static uint32_t
raddr_add_a_to_live_reg_index(uint64_t inst)
{
	uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
	uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
	uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
	uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);

	if (add_a == QPU_MUX_A)
		return raddr_a;
	else if (add_a == QPU_MUX_B && sig != QPU_SIG_SMALL_IMM)
		return 32 + raddr_b;
	else if (add_a <= QPU_MUX_R3)
		return 64 + add_a;
	else
		return ~0;
}

133 134 135 136 137 138 139
static bool
live_reg_is_upper_half(uint32_t lri)
{
	return	(lri >= 16 && lri < 32) ||
		(lri >= 32 + 16 && lri < 32 + 32);
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static bool
is_tmu_submit(uint32_t waddr)
{
	return (waddr == QPU_W_TMU0_S ||
		waddr == QPU_W_TMU1_S);
}

static bool
is_tmu_write(uint32_t waddr)
{
	return (waddr >= QPU_W_TMU0_S &&
		waddr <= QPU_W_TMU1_B);
}

static bool
record_texture_sample(struct vc4_validated_shader_info *validated_shader,
		      struct vc4_shader_validation_state *validation_state,
		      int tmu)
{
	uint32_t s = validated_shader->num_texture_samples;
	int i;
	struct vc4_texture_sample_info *temp_samples;

	temp_samples = krealloc(validated_shader->texture_samples,
				(s + 1) * sizeof(*temp_samples),
				GFP_KERNEL);
	if (!temp_samples)
		return false;

	memcpy(&temp_samples[s],
	       &validation_state->tmu_setup[tmu],
	       sizeof(*temp_samples));

	validated_shader->num_texture_samples = s + 1;
	validated_shader->texture_samples = temp_samples;

	for (i = 0; i < 4; i++)
		validation_state->tmu_setup[tmu].p_offset[i] = ~0;

	return true;
}

static bool
183
check_tmu_write(struct vc4_validated_shader_info *validated_shader,
184 185 186
		struct vc4_shader_validation_state *validation_state,
		bool is_mul)
{
187
	uint64_t inst = validation_state->shader[validation_state->ip];
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	uint32_t waddr = (is_mul ?
			  QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
			  QPU_GET_FIELD(inst, QPU_WADDR_ADD));
	uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
	uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
	int tmu = waddr > QPU_W_TMU0_B;
	bool submit = is_tmu_submit(waddr);
	bool is_direct = submit && validation_state->tmu_write_count[tmu] == 0;
	uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);

	if (is_direct) {
		uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
		uint32_t clamp_reg, clamp_offset;

		if (sig == QPU_SIG_SMALL_IMM) {
203
			DRM_DEBUG("direct TMU read used small immediate\n");
204 205 206 207 208 209 210 211
			return false;
		}

		/* Make sure that this texture load is an add of the base
		 * address of the UBO to a clamped offset within the UBO.
		 */
		if (is_mul ||
		    QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
212
			DRM_DEBUG("direct TMU load wasn't an add\n");
213 214 215
			return false;
		}

216
		/* We assert that the clamped address is the first
217 218 219 220 221 222
		 * argument, and the UBO base address is the second argument.
		 * This is arbitrary, but simpler than supporting flipping the
		 * two either way.
		 */
		clamp_reg = raddr_add_a_to_live_reg_index(inst);
		if (clamp_reg == ~0) {
223
			DRM_DEBUG("direct TMU load wasn't clamped\n");
224 225 226 227 228
			return false;
		}

		clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
		if (clamp_offset == ~0) {
229
			DRM_DEBUG("direct TMU load wasn't clamped\n");
230 231 232 233 234 235 236 237 238 239 240
			return false;
		}

		/* Store the clamp value's offset in p1 (see reloc_tex() in
		 * vc4_validate.c).
		 */
		validation_state->tmu_setup[tmu].p_offset[1] =
			clamp_offset;

		if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
		    !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
241
			DRM_DEBUG("direct TMU load didn't add to a uniform\n");
242 243 244 245 246 247 248
			return false;
		}

		validation_state->tmu_setup[tmu].is_direct = true;
	} else {
		if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
					      raddr_b == QPU_R_UNIF)) {
249
			DRM_DEBUG("uniform read in the same instruction as "
250 251 252 253 254 255
				  "texture setup.\n");
			return false;
		}
	}

	if (validation_state->tmu_write_count[tmu] >= 4) {
256
		DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
257 258 259 260 261 262 263 264 265
			  tmu);
		return false;
	}
	validation_state->tmu_setup[tmu].p_offset[validation_state->tmu_write_count[tmu]] =
		validated_shader->uniforms_size;
	validation_state->tmu_write_count[tmu]++;
	/* Since direct uses a RADDR uniform reference, it will get counted in
	 * check_instruction_reads()
	 */
266 267
	if (!is_direct) {
		if (validation_state->needs_uniform_address_update) {
268
			DRM_DEBUG("Texturing with undefined uniform address\n");
269 270 271
			return false;
		}

272
		validated_shader->uniforms_size += 4;
273
	}
274 275 276 277 278 279 280 281 282 283 284 285 286

	if (submit) {
		if (!record_texture_sample(validated_shader,
					   validation_state, tmu)) {
			return false;
		}

		validation_state->tmu_write_count[tmu] = 0;
	}

	return true;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
{
	uint32_t o = validated_shader->num_uniform_addr_offsets;
	uint32_t num_uniforms = validated_shader->uniforms_size / 4;

	validated_shader->uniform_addr_offsets =
		krealloc(validated_shader->uniform_addr_offsets,
			 (o + 1) *
			 sizeof(*validated_shader->uniform_addr_offsets),
			 GFP_KERNEL);
	if (!validated_shader->uniform_addr_offsets)
		return false;

	validated_shader->uniform_addr_offsets[o] = num_uniforms;
	validated_shader->num_uniform_addr_offsets++;

	return true;
}

static bool
validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
			       struct vc4_shader_validation_state *validation_state,
			       bool is_mul)
{
	uint64_t inst = validation_state->shader[validation_state->ip];
	u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
	u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
	u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
	u32 add_lri = raddr_add_a_to_live_reg_index(inst);
	/* We want our reset to be pointing at whatever uniform follows the
	 * uniforms base address.
	 */
	u32 expected_offset = validated_shader->uniforms_size + 4;

	/* We only support absolute uniform address changes, and we
	 * require that they be in the current basic block before any
	 * of its uniform reads.
	 *
	 * One could potentially emit more efficient QPU code, by
	 * noticing that (say) an if statement does uniform control
	 * flow for all threads and that the if reads the same number
	 * of uniforms on each side.  However, this scheme is easy to
	 * validate so it's all we allow for now.
	 */
331 332 333 334 335 336 337 338
	switch (QPU_GET_FIELD(inst, QPU_SIG)) {
	case QPU_SIG_NONE:
	case QPU_SIG_SCOREBOARD_UNLOCK:
	case QPU_SIG_COLOR_LOAD:
	case QPU_SIG_LOAD_TMU0:
	case QPU_SIG_LOAD_TMU1:
		break;
	default:
339
		DRM_DEBUG("uniforms address change must be "
340 341 342 343 344
			  "normal math\n");
		return false;
	}

	if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
345
		DRM_DEBUG("Uniform address reset must be an ADD.\n");
346 347 348 349
		return false;
	}

	if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
350
		DRM_DEBUG("Uniform address reset must be unconditional.\n");
351 352 353 354 355
		return false;
	}

	if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
	    !(inst & QPU_PM)) {
356
		DRM_DEBUG("No packing allowed on uniforms reset\n");
357 358 359 360
		return false;
	}

	if (add_lri == -1) {
361
		DRM_DEBUG("First argument of uniform address write must be "
362 363 364 365 366
			  "an immediate value.\n");
		return false;
	}

	if (validation_state->live_immediates[add_lri] != expected_offset) {
367
		DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
368 369 370 371 372 373 374
			  validation_state->live_immediates[add_lri],
			  expected_offset);
		return false;
	}

	if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
	    !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
375
		DRM_DEBUG("Second argument of uniform address write must be "
376 377 378 379 380 381 382 383 384
			  "a uniform.\n");
		return false;
	}

	validation_state->needs_uniform_address_update = false;
	validation_state->needs_uniform_address_for_loop = false;
	return require_uniform_address_uniform(validated_shader);
}

385
static bool
386
check_reg_write(struct vc4_validated_shader_info *validated_shader,
387 388 389
		struct vc4_shader_validation_state *validation_state,
		bool is_mul)
{
390
	uint64_t inst = validation_state->shader[validation_state->ip];
391 392 393
	uint32_t waddr = (is_mul ?
			  QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
			  QPU_GET_FIELD(inst, QPU_WADDR_ADD));
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
	bool ws = inst & QPU_WS;
	bool is_b = is_mul ^ ws;
	u32 lri = waddr_to_live_reg_index(waddr, is_b);

	if (lri != -1) {
		uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
		uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);

		if (sig == QPU_SIG_LOAD_IMM &&
		    QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
		    ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
		     (!is_mul && cond_add == QPU_COND_ALWAYS))) {
			validation_state->live_immediates[lri] =
				QPU_GET_FIELD(inst, QPU_LOAD_IMM);
		} else {
			validation_state->live_immediates[lri] = ~0;
		}
412 413 414

		if (live_reg_is_upper_half(lri))
			validation_state->all_registers_used = true;
415
	}
416 417 418

	switch (waddr) {
	case QPU_W_UNIFORMS_ADDRESS:
419
		if (is_b) {
420
			DRM_DEBUG("relative uniforms address change "
421 422 423 424 425 426 427
				  "unsupported\n");
			return false;
		}

		return validate_uniform_address_write(validated_shader,
						      validation_state,
						      is_mul);
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444

	case QPU_W_TLB_COLOR_MS:
	case QPU_W_TLB_COLOR_ALL:
	case QPU_W_TLB_Z:
		/* These only interact with the tile buffer, not main memory,
		 * so they're safe.
		 */
		return true;

	case QPU_W_TMU0_S:
	case QPU_W_TMU0_T:
	case QPU_W_TMU0_R:
	case QPU_W_TMU0_B:
	case QPU_W_TMU1_S:
	case QPU_W_TMU1_T:
	case QPU_W_TMU1_R:
	case QPU_W_TMU1_B:
445
		return check_tmu_write(validated_shader, validation_state,
446 447 448 449 450 451 452 453 454
				       is_mul);

	case QPU_W_HOST_INT:
	case QPU_W_TMU_NOSWAP:
	case QPU_W_TLB_ALPHA_MASK:
	case QPU_W_MUTEX_RELEASE:
		/* XXX: I haven't thought about these, so don't support them
		 * for now.
		 */
455
		DRM_DEBUG("Unsupported waddr %d\n", waddr);
456 457 458
		return false;

	case QPU_W_VPM_ADDR:
459
		DRM_DEBUG("General VPM DMA unsupported\n");
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		return false;

	case QPU_W_VPM:
	case QPU_W_VPMVCD_SETUP:
		/* We allow VPM setup in general, even including VPM DMA
		 * configuration setup, because the (unsafe) DMA can only be
		 * triggered by QPU_W_VPM_ADDR writes.
		 */
		return true;

	case QPU_W_TLB_STENCIL_SETUP:
		return true;
	}

	return true;
}

static void
478
track_live_clamps(struct vc4_validated_shader_info *validated_shader,
479 480
		  struct vc4_shader_validation_state *validation_state)
{
481
	uint64_t inst = validation_state->shader[validation_state->ip];
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
	uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
	uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
	uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
	uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
	uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
	uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
	uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
	uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
	bool ws = inst & QPU_WS;
	uint32_t lri_add_a, lri_add, lri_mul;
	bool add_a_is_min_0;

	/* Check whether OP_ADD's A argumennt comes from a live MAX(x, 0),
	 * before we clear previous live state.
	 */
	lri_add_a = raddr_add_a_to_live_reg_index(inst);
	add_a_is_min_0 = (lri_add_a != ~0 &&
			  validation_state->live_max_clamp_regs[lri_add_a]);

	/* Clear live state for registers written by our instruction. */
	lri_add = waddr_to_live_reg_index(waddr_add, ws);
	lri_mul = waddr_to_live_reg_index(waddr_mul, !ws);
	if (lri_mul != ~0) {
		validation_state->live_max_clamp_regs[lri_mul] = false;
		validation_state->live_min_clamp_offsets[lri_mul] = ~0;
	}
	if (lri_add != ~0) {
		validation_state->live_max_clamp_regs[lri_add] = false;
		validation_state->live_min_clamp_offsets[lri_add] = ~0;
	} else {
		/* Nothing further to do for live tracking, since only ADDs
		 * generate new live clamp registers.
		 */
		return;
	}

	/* Now, handle remaining live clamp tracking for the ADD operation. */

	if (cond_add != QPU_COND_ALWAYS)
		return;

	if (op_add == QPU_A_MAX) {
		/* Track live clamps of a value to a minimum of 0 (in either
		 * arg).
		 */
		if (sig != QPU_SIG_SMALL_IMM || raddr_b != 0 ||
		    (add_a != QPU_MUX_B && add_b != QPU_MUX_B)) {
			return;
		}

		validation_state->live_max_clamp_regs[lri_add] = true;
	} else if (op_add == QPU_A_MIN) {
		/* Track live clamps of a value clamped to a minimum of 0 and
		 * a maximum of some uniform's offset.
		 */
		if (!add_a_is_min_0)
			return;

		if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
		    !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF &&
		      sig != QPU_SIG_SMALL_IMM)) {
			return;
		}

		validation_state->live_min_clamp_offsets[lri_add] =
			validated_shader->uniforms_size;
	}
}

static bool
553
check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
554 555
			 struct vc4_shader_validation_state *validation_state)
{
556
	uint64_t inst = validation_state->shader[validation_state->ip];
557 558 559 560 561
	uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
	uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
	bool ok;

	if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
562
		DRM_DEBUG("ADD and MUL both set up textures\n");
563 564 565
		return false;
	}

566 567
	ok = (check_reg_write(validated_shader, validation_state, false) &&
	      check_reg_write(validated_shader, validation_state, true));
568

569
	track_live_clamps(validated_shader, validation_state);
570 571 572 573 574

	return ok;
}

static bool
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
check_branch(uint64_t inst,
	     struct vc4_validated_shader_info *validated_shader,
	     struct vc4_shader_validation_state *validation_state,
	     int ip)
{
	int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
	uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
	uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);

	if ((int)branch_imm < 0)
		validation_state->needs_uniform_address_for_loop = true;

	/* We don't want to have to worry about validation of this, and
	 * there's no need for it.
	 */
	if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
591
		DRM_DEBUG("branch instruction at %d wrote a register.\n",
592 593 594 595 596 597 598 599 600 601
			  validation_state->ip);
		return false;
	}

	return true;
}

static bool
check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
			struct vc4_shader_validation_state *validation_state)
602
{
603
	uint64_t inst = validation_state->shader[validation_state->ip];
604 605 606 607 608 609 610 611 612 613 614
	uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
	uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
	uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);

	if (raddr_a == QPU_R_UNIF ||
	    (raddr_b == QPU_R_UNIF && sig != QPU_SIG_SMALL_IMM)) {
		/* This can't overflow the uint32_t, because we're reading 8
		 * bytes of instruction to increment by 4 here, so we'd
		 * already be OOM.
		 */
		validated_shader->uniforms_size += 4;
615 616

		if (validation_state->needs_uniform_address_update) {
617
			DRM_DEBUG("Uniform read with undefined uniform "
618 619 620
				  "address\n");
			return false;
		}
621 622
	}

623 624 625 626 627
	if ((raddr_a >= 16 && raddr_a < 32) ||
	    (raddr_b >= 16 && raddr_b < 32 && sig != QPU_SIG_SMALL_IMM)) {
		validation_state->all_registers_used = true;
	}

628 629 630
	return true;
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/* Make sure that all branches are absolute and point within the shader, and
 * note their targets for later.
 */
static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
	uint32_t max_branch_target = 0;
	int ip;
	int last_branch = -2;

	for (ip = 0; ip < validation_state->max_ip; ip++) {
		uint64_t inst = validation_state->shader[ip];
		int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
		uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
		uint32_t after_delay_ip = ip + 4;
		uint32_t branch_target_ip;

		if (sig == QPU_SIG_PROG_END) {
649 650 651 652 653 654 655
			/* There are two delay slots after program end is
			 * signaled that are still executed, then we're
			 * finished.  validation_state->max_ip is the
			 * instruction after the last valid instruction in the
			 * program.
			 */
			validation_state->max_ip = ip + 3;
656 657 658 659 660 661 662
			continue;
		}

		if (sig != QPU_SIG_BRANCH)
			continue;

		if (ip - last_branch < 4) {
663
			DRM_DEBUG("Branch at %d during delay slots\n", ip);
664 665 666 667 668
			return false;
		}
		last_branch = ip;

		if (inst & QPU_BRANCH_REG) {
669
			DRM_DEBUG("branching from register relative "
670 671 672 673 674
				  "not supported\n");
			return false;
		}

		if (!(inst & QPU_BRANCH_REL)) {
675
			DRM_DEBUG("relative branching required\n");
676 677 678 679 680 681 682 683 684
			return false;
		}

		/* The actual branch target is the instruction after the delay
		 * slots, plus whatever byte offset is in the low 32 bits of
		 * the instruction.  Make sure we're not branching beyond the
		 * end of the shader object.
		 */
		if (branch_imm % sizeof(inst) != 0) {
685
			DRM_DEBUG("branch target not aligned\n");
686 687 688 689 690
			return false;
		}

		branch_target_ip = after_delay_ip + (branch_imm >> 3);
		if (branch_target_ip >= validation_state->max_ip) {
691
			DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
692 693 694 695 696 697 698 699 700 701
				  ip, branch_target_ip,
				  validation_state->max_ip);
			return false;
		}
		set_bit(branch_target_ip, validation_state->branch_targets);

		/* Make sure that the non-branching path is also not outside
		 * the shader.
		 */
		if (after_delay_ip >= validation_state->max_ip) {
702
			DRM_DEBUG("Branch at %d continues past shader end "
703 704 705 706 707 708 709 710
				  "(%d/%d)\n",
				  ip, after_delay_ip, validation_state->max_ip);
			return false;
		}
		set_bit(after_delay_ip, validation_state->branch_targets);
		max_branch_target = max(max_branch_target, after_delay_ip);
	}

711
	if (max_branch_target > validation_state->max_ip - 3) {
712
		DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
713 714 715 716 717 718
		return false;
	}

	return true;
}

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
/* Resets any known state for the shader, used when we may be branched to from
 * multiple locations in the program (or at shader start).
 */
static void
reset_validation_state(struct vc4_shader_validation_state *validation_state)
{
	int i;

	for (i = 0; i < 8; i++)
		validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;

	for (i = 0; i < LIVE_REG_COUNT; i++) {
		validation_state->live_min_clamp_offsets[i] = ~0;
		validation_state->live_max_clamp_regs[i] = false;
		validation_state->live_immediates[i] = ~0;
	}
}

static bool
texturing_in_progress(struct vc4_shader_validation_state *validation_state)
{
	return (validation_state->tmu_write_count[0] != 0 ||
		validation_state->tmu_write_count[1] != 0);
}

static bool
vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
{
	uint32_t ip = validation_state->ip;

	if (!test_bit(ip, validation_state->branch_targets))
		return true;

	if (texturing_in_progress(validation_state)) {
753
		DRM_DEBUG("Branch target landed during TMU setup\n");
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
		return false;
	}

	/* Reset our live values tracking, since this instruction may have
	 * multiple predecessors.
	 *
	 * One could potentially do analysis to determine that, for
	 * example, all predecessors have a live max clamp in the same
	 * register, but we don't bother with that.
	 */
	reset_validation_state(validation_state);

	/* Since we've entered a basic block from potentially multiple
	 * predecessors, we need the uniforms address to be updated before any
	 * unforms are read.  We require that after any branch point, the next
	 * uniform to be loaded is a uniform address offset.  That uniform's
	 * offset will be marked by the uniform address register write
	 * validation, or a one-off the end-of-program check.
	 */
	validation_state->needs_uniform_address_update = true;

	return true;
}

778 779 780 781 782
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
	bool found_shader_end = false;
	int shader_end_ip = 0;
783
	uint32_t last_thread_switch_ip = -3;
784
	uint32_t ip;
785
	struct vc4_validated_shader_info *validated_shader = NULL;
786 787 788
	struct vc4_shader_validation_state validation_state;

	memset(&validation_state, 0, sizeof(validation_state));
789 790
	validation_state.shader = shader_obj->vaddr;
	validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
791

792
	reset_validation_state(&validation_state);
793

794 795 796 797 798 799
	validation_state.branch_targets =
		kcalloc(BITS_TO_LONGS(validation_state.max_ip),
			sizeof(unsigned long), GFP_KERNEL);
	if (!validation_state.branch_targets)
		goto fail;

800 801
	validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
	if (!validated_shader)
802 803 804 805
		goto fail;

	if (!vc4_validate_branches(&validation_state))
		goto fail;
806

807 808
	for (ip = 0; ip < validation_state.max_ip; ip++) {
		uint64_t inst = validation_state.shader[ip];
809 810
		uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);

811 812
		validation_state.ip = ip;

813 814 815
		if (!vc4_handle_branch_target(&validation_state))
			goto fail;

816 817 818 819 820 821 822 823 824 825 826
		if (ip == last_thread_switch_ip + 3) {
			/* Reset r0-r3 live clamp data */
			int i;

			for (i = 64; i < LIVE_REG_COUNT; i++) {
				validation_state.live_min_clamp_offsets[i] = ~0;
				validation_state.live_max_clamp_regs[i] = false;
				validation_state.live_immediates[i] = ~0;
			}
		}

827 828 829 830 831 832 833 834 835
		switch (sig) {
		case QPU_SIG_NONE:
		case QPU_SIG_WAIT_FOR_SCOREBOARD:
		case QPU_SIG_SCOREBOARD_UNLOCK:
		case QPU_SIG_COLOR_LOAD:
		case QPU_SIG_LOAD_TMU0:
		case QPU_SIG_LOAD_TMU1:
		case QPU_SIG_PROG_END:
		case QPU_SIG_SMALL_IMM:
836 837
		case QPU_SIG_THREAD_SWITCH:
		case QPU_SIG_LAST_THREAD_SWITCH:
838
			if (!check_instruction_writes(validated_shader,
839
						      &validation_state)) {
840
				DRM_DEBUG("Bad write at ip %d\n", ip);
841 842 843
				goto fail;
			}

844 845
			if (!check_instruction_reads(validated_shader,
						     &validation_state))
846 847 848 849 850 851 852
				goto fail;

			if (sig == QPU_SIG_PROG_END) {
				found_shader_end = true;
				shader_end_ip = ip;
			}

853 854 855 856 857
			if (sig == QPU_SIG_THREAD_SWITCH ||
			    sig == QPU_SIG_LAST_THREAD_SWITCH) {
				validated_shader->is_threaded = true;

				if (ip < last_thread_switch_ip + 3) {
858
					DRM_DEBUG("Thread switch too soon after "
859 860 861 862 863 864
						  "last switch at ip %d\n", ip);
					goto fail;
				}
				last_thread_switch_ip = ip;
			}

865 866 867
			break;

		case QPU_SIG_LOAD_IMM:
868
			if (!check_instruction_writes(validated_shader,
869
						      &validation_state)) {
870
				DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
871 872 873 874
				goto fail;
			}
			break;

875 876 877 878
		case QPU_SIG_BRANCH:
			if (!check_branch(inst, validated_shader,
					  &validation_state, ip))
				goto fail;
879 880

			if (ip < last_thread_switch_ip + 3) {
881
				DRM_DEBUG("Branch in thread switch at ip %d",
882 883 884 885
					  ip);
				goto fail;
			}

886
			break;
887
		default:
888
			DRM_DEBUG("Unsupported QPU signal %d at "
889 890 891 892 893 894 895 896 897 898 899
				  "instruction %d\n", sig, ip);
			goto fail;
		}

		/* There are two delay slots after program end is signaled
		 * that are still executed, then we're finished.
		 */
		if (found_shader_end && ip == shader_end_ip + 2)
			break;
	}

900
	if (ip == validation_state.max_ip) {
901
		DRM_DEBUG("shader failed to terminate before "
902 903 904 905 906
			  "shader BO end at %zd\n",
			  shader_obj->base.size);
		goto fail;
	}

907 908 909
	/* Might corrupt other thread */
	if (validated_shader->is_threaded &&
	    validation_state.all_registers_used) {
910
		DRM_DEBUG("Shader uses threading, but uses the upper "
911 912 913 914
			  "half of the registers, too\n");
		goto fail;
	}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929
	/* If we did a backwards branch and we haven't emitted a uniforms
	 * reset since then, we still need the uniforms stream to have the
	 * uniforms address available so that the backwards branch can do its
	 * uniforms reset.
	 *
	 * We could potentially prove that the backwards branch doesn't
	 * contain any uses of uniforms until program exit, but that doesn't
	 * seem to be worth the trouble.
	 */
	if (validation_state.needs_uniform_address_for_loop) {
		if (!require_uniform_address_uniform(validated_shader))
			goto fail;
		validated_shader->uniforms_size += 4;
	}

930 931 932 933 934 935 936 937
	/* Again, no chance of integer overflow here because the worst case
	 * scenario is 8 bytes of uniforms plus handles per 8-byte
	 * instruction.
	 */
	validated_shader->uniforms_src_size =
		(validated_shader->uniforms_size +
		 4 * validated_shader->num_texture_samples);

938 939
	kfree(validation_state.branch_targets);

940 941 942
	return validated_shader;

fail:
943
	kfree(validation_state.branch_targets);
944
	if (validated_shader) {
945
		kfree(validated_shader->uniform_addr_offsets);
946 947 948 949 950
		kfree(validated_shader->texture_samples);
		kfree(validated_shader);
	}
	return NULL;
}