r600_cs.c 77.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/kernel.h>
29
#include <drm/drmP.h>
30 31
#include "radeon.h"
#include "r600d.h"
32
#include "r600_reg_safe.h"
33

34
static int r600_nomm;
35 36
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);

37

38
struct r600_cs_track {
39 40 41 42 43
	/* configuration we miror so that we use same code btw kms/ums */
	u32			group_size;
	u32			nbanks;
	u32			npipes;
	/* value we track */
44
	u32			sq_config;
45
	u32			log_nsamples;
46 47 48
	u32			nsamples;
	u32			cb_color_base_last[8];
	struct radeon_bo	*cb_color_bo[8];
49
	u64			cb_color_bo_mc[8];
50 51 52 53 54 55
	u64			cb_color_bo_offset[8];
	struct radeon_bo	*cb_color_frag_bo[8];
	u64			cb_color_frag_offset[8];
	struct radeon_bo	*cb_color_tile_bo[8];
	u64			cb_color_tile_offset[8];
	u32			cb_color_mask[8];
56
	u32			cb_color_info[8];
57
	u32			cb_color_view[8];
58
	u32			cb_color_size_idx[8]; /* unused */
59
	u32			cb_target_mask;
60
	u32			cb_shader_mask;  /* unused */
61
	bool			is_resolve;
62 63 64
	u32			cb_color_size[8];
	u32			vgt_strmout_en;
	u32			vgt_strmout_buffer_en;
65
	struct radeon_bo	*vgt_strmout_bo[4];
66
	u64			vgt_strmout_bo_mc[4]; /* unused */
67 68
	u32			vgt_strmout_bo_offset[4];
	u32			vgt_strmout_size[4];
69 70 71 72 73 74 75
	u32			db_depth_control;
	u32			db_depth_info;
	u32			db_depth_size_idx;
	u32			db_depth_view;
	u32			db_depth_size;
	u32			db_offset;
	struct radeon_bo	*db_bo;
76
	u64			db_bo_mc;
77
	bool			sx_misc_kill_all_prims;
78 79 80
	bool			cb_dirty;
	bool			db_dirty;
	bool			streamout_dirty;
81 82 83
	struct radeon_bo	*htile_bo;
	u64			htile_offset;
	u32			htile_surface;
84 85
};

86 87
#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
88
#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
89
#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
90
#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
91 92 93
#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
94 95 96 97 98 99

struct gpu_formats {
	unsigned blockwidth;
	unsigned blockheight;
	unsigned blocksize;
	unsigned valid_color;
100
	enum radeon_family min_family;
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
};

static const struct gpu_formats color_formats_table[] = {
	/* 8 bit */
	FMT_8_BIT(V_038004_COLOR_8, 1),
	FMT_8_BIT(V_038004_COLOR_4_4, 1),
	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
	FMT_8_BIT(V_038004_FMT_1, 0),

	/* 16-bit */
	FMT_16_BIT(V_038004_COLOR_16, 1),
	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
	FMT_16_BIT(V_038004_COLOR_8_8, 1),
	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),

	/* 24-bit */
	FMT_24_BIT(V_038004_FMT_8_8_8),
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	/* 32-bit */
	FMT_32_BIT(V_038004_COLOR_32, 1),
	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_16_16, 1),
	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_8_24, 1),
	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_24_8, 1),
	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),

	/* 48-bit */
	FMT_48_BIT(V_038004_FMT_16_16_16),
	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),

	/* 64-bit */
	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_32_32, 1),
	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),

	FMT_96_BIT(V_038004_FMT_32_32_32),
	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),

	/* 128-bit */
	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),

	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },

	/* block compressed formats */
	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
170 171
	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
172

173 174
	/* The other Evergreen formats */
	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
175 176
};

177
bool r600_fmt_is_valid_color(u32 format)
178
{
179
	if (format >= ARRAY_SIZE(color_formats_table))
180
		return false;
181

182 183 184 185 186 187
	if (color_formats_table[format].valid_color)
		return true;

	return false;
}

188
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
189
{
190
	if (format >= ARRAY_SIZE(color_formats_table))
191
		return false;
192

193 194 195
	if (family < color_formats_table[format].min_family)
		return false;

196 197 198 199 200 201
	if (color_formats_table[format].blockwidth > 0)
		return true;

	return false;
}

202
int r600_fmt_get_blocksize(u32 format)
203
{
204
	if (format >= ARRAY_SIZE(color_formats_table))
205 206 207 208 209
		return 0;

	return color_formats_table[format].blocksize;
}

210
int r600_fmt_get_nblocksx(u32 format, u32 w)
211 212
{
	unsigned bw;
213 214

	if (format >= ARRAY_SIZE(color_formats_table))
215 216 217 218 219 220 221 222 223
		return 0;

	bw = color_formats_table[format].blockwidth;
	if (bw == 0)
		return 0;

	return (w + bw - 1) / bw;
}

224
int r600_fmt_get_nblocksy(u32 format, u32 h)
225 226
{
	unsigned bh;
227 228

	if (format >= ARRAY_SIZE(color_formats_table))
229 230 231 232 233 234 235 236 237
		return 0;

	bh = color_formats_table[format].blockheight;
	if (bh == 0)
		return 0;

	return (h + bh - 1) / bh;
}

238 239 240 241 242 243
struct array_mode_checker {
	int array_mode;
	u32 group_size;
	u32 nbanks;
	u32 npipes;
	u32 nsamples;
244
	u32 blocksize;
245 246 247
};

/* returns alignment in pixels for pitch/height/depth and bytes for base */
248
static int r600_get_array_mode_alignment(struct array_mode_checker *values,
249 250 251 252 253 254 255 256 257
						u32 *pitch_align,
						u32 *height_align,
						u32 *depth_align,
						u64 *base_align)
{
	u32 tile_width = 8;
	u32 tile_height = 8;
	u32 macro_tile_width = values->nbanks;
	u32 macro_tile_height = values->npipes;
258
	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
259 260 261 262 263 264 265 266 267 268 269
	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;

	switch (values->array_mode) {
	case ARRAY_LINEAR_GENERAL:
		/* technically tile_width/_height for pitch/height */
		*pitch_align = 1; /* tile_width */
		*height_align = 1; /* tile_height */
		*depth_align = 1;
		*base_align = 1;
		break;
	case ARRAY_LINEAR_ALIGNED:
270
		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
271
		*height_align = 1;
272 273 274 275 276 277
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_1D_TILED_THIN1:
		*pitch_align = max((u32)tile_width,
				   (u32)(values->group_size /
278
					 (tile_height * values->blocksize * values->nsamples)));
279 280 281 282 283
		*height_align = tile_height;
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_2D_TILED_THIN1:
284 285 286
		*pitch_align = max((u32)macro_tile_width * tile_width,
				(u32)((values->group_size * values->nbanks) /
				(values->blocksize * values->nsamples * tile_width)));
287 288 289
		*height_align = macro_tile_height * tile_height;
		*depth_align = 1;
		*base_align = max(macro_tile_bytes,
290
				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
291 292 293 294 295 296 297 298
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

299 300 301 302
static void r600_cs_track_init(struct r600_cs_track *track)
{
	int i;

303 304
	/* assume DX9 mode */
	track->sq_config = DX9_CONSTS;
305 306 307 308 309
	for (i = 0; i < 8; i++) {
		track->cb_color_base_last[i] = 0;
		track->cb_color_size[i] = 0;
		track->cb_color_size_idx[i] = 0;
		track->cb_color_info[i] = 0;
310
		track->cb_color_view[i] = 0xFFFFFFFF;
311 312
		track->cb_color_bo[i] = NULL;
		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313
		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 315 316 317 318
		track->cb_color_frag_bo[i] = NULL;
		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
		track->cb_color_tile_bo[i] = NULL;
		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
		track->cb_color_mask[i] = 0xFFFFFFFF;
319
	}
320
	track->is_resolve = false;
321 322
	track->nsamples = 16;
	track->log_nsamples = 4;
323 324
	track->cb_target_mask = 0xFFFFFFFF;
	track->cb_shader_mask = 0xFFFFFFFF;
325
	track->cb_dirty = true;
326
	track->db_bo = NULL;
327
	track->db_bo_mc = 0xFFFFFFFF;
328 329 330 331 332 333
	/* assume the biggest format and that htile is enabled */
	track->db_depth_info = 7 | (1 << 25);
	track->db_depth_view = 0xFFFFC000;
	track->db_depth_size = 0xFFFFFFFF;
	track->db_depth_size_idx = 0;
	track->db_depth_control = 0xFFFFFFFF;
334
	track->db_dirty = true;
335 336 337
	track->htile_bo = NULL;
	track->htile_offset = 0xFFFFFFFF;
	track->htile_surface = 0;
338 339 340 341 342 343 344

	for (i = 0; i < 4; i++) {
		track->vgt_strmout_size[i] = 0;
		track->vgt_strmout_bo[i] = NULL;
		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
	}
345
	track->streamout_dirty = true;
346
	track->sx_misc_kill_all_prims = false;
347 348
}

349
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
350 351
{
	struct r600_cs_track *track = p->track;
352
	u32 slice_tile_max, size, tmp;
353 354 355
	u32 height, height_align, pitch, pitch_align, depth_align;
	u64 base_offset, base_align;
	struct array_mode_checker array_check;
356
	volatile u32 *ib = p->ib.ptr;
357
	unsigned array_mode;
358
	u32 format;
359 360
	/* When resolve is used, the second colorbuffer has always 1 sample. */
	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
361

362
	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
363
	format = G_0280A0_FORMAT(track->cb_color_info[i]);
364
	if (!r600_fmt_is_valid_color(format)) {
365
		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
366
			 __func__, __LINE__, format,
367 368 369
			i, track->cb_color_info[i]);
		return -EINVAL;
	}
370 371
	/* pitch in pixels */
	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
372
	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
373
	slice_tile_max *= 64;
374
	height = slice_tile_max / pitch;
375 376
	if (height > 8192)
		height = 8192;
377
	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
378 379 380 381 382 383

	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
	array_check.array_mode = array_mode;
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
384
	array_check.nsamples = nsamples;
385
	array_check.blocksize = r600_fmt_get_blocksize(format);
386 387 388 389 390 391 392
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			 track->cb_color_info[i]);
		return -EINVAL;
	}
393
	switch (array_mode) {
394
	case V_0280A0_ARRAY_LINEAR_GENERAL:
395
		break;
396 397 398
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
399 400 401
		/* avoid breaking userspace */
		if (height > 7)
			height &= ~0x7;
402 403 404 405 406 407 408 409 410
		break;
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		break;
	default:
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			track->cb_color_info[i]);
		return -EINVAL;
	}
411 412

	if (!IS_ALIGNED(pitch, pitch_align)) {
413 414
		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, array_mode);
415 416 417
		return -EINVAL;
	}
	if (!IS_ALIGNED(height, height_align)) {
418 419
		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, height, height_align, array_mode);
420 421 422
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
423 424
		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
			 base_offset, base_align, array_mode);
425 426 427
		return -EINVAL;
	}

428
	/* check offset */
429
	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
430
	      r600_fmt_get_blocksize(format) * nsamples;
431 432 433 434 435 436 437 438 439 440 441
	switch (array_mode) {
	default:
	case V_0280A0_ARRAY_LINEAR_GENERAL:
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		tmp += track->cb_color_view[i] & 0xFF;
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
		break;
	}
442
	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
443 444 445
		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
			/* the initial DDX does bad things with the CB size occasionally */
			/* it rounds up height too far for slice tile max but the BO is smaller */
446 447 448 449
			/* r600c,g also seem to flush at bad times in some apps resulting in
			 * bogus values here. So for linear just allow anything to avoid breaking
			 * broken userspace.
			 */
450
		} else {
451
			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
452
				 __func__, i, array_mode,
453
				 track->cb_color_bo_offset[i], tmp,
454 455 456 457
				 radeon_bo_size(track->cb_color_bo[i]),
				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
				 r600_fmt_get_nblocksy(format, height),
				 r600_fmt_get_blocksize(format));
458 459
			return -EINVAL;
		}
460
	}
461
	/* limit max tile */
462
	tmp = (height * pitch) >> 6;
463 464
	if (tmp < slice_tile_max)
		slice_tile_max = tmp;
465
	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
466 467
		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
	ib[track->cb_color_size_idx[i]] = tmp;
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

	/* FMASK/CMASK */
	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
	case V_0280A0_TILE_DISABLE:
		break;
	case V_0280A0_FRAG_ENABLE:
		if (track->nsamples > 1) {
			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
			/* the tile size is 8x8, but the size is in units of bits.
			 * for bytes, do just * 8. */
			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);

			if (bytes + track->cb_color_frag_offset[i] >
			    radeon_bo_size(track->cb_color_frag_bo[i])) {
				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
					 __func__, tile_max, bytes,
					 track->cb_color_frag_offset[i],
					 radeon_bo_size(track->cb_color_frag_bo[i]));
				return -EINVAL;
			}
		}
		/* fall through */
	case V_0280A0_CLEAR_ENABLE:
	{
		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
		uint32_t bytes = (block_max + 1) * 128;

		if (bytes + track->cb_color_tile_offset[i] >
		    radeon_bo_size(track->cb_color_tile_bo[i])) {
			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
				 __func__, block_max, bytes,
				 track->cb_color_tile_offset[i],
				 radeon_bo_size(track->cb_color_tile_bo[i]));
			return -EINVAL;
		}
		break;
	}
	default:
		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
		return -EINVAL;
	}
513 514 515
	return 0;
}

516 517 518 519 520 521 522 523 524 525
static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
{
	struct r600_cs_track *track = p->track;
	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
	u32 height_align, pitch_align, depth_align;
	u32 pitch = 8192;
	u32 height = 8192;
	u64 base_offset, base_align;
	struct array_mode_checker array_check;
	int array_mode;
526
	volatile u32 *ib = p->ib.ptr;
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620


	if (track->db_bo == NULL) {
		dev_warn(p->dev, "z/stencil with no depth buffer\n");
		return -EINVAL;
	}
	switch (G_028010_FORMAT(track->db_depth_info)) {
	case V_028010_DEPTH_16:
		bpe = 2;
		break;
	case V_028010_DEPTH_X8_24:
	case V_028010_DEPTH_8_24:
	case V_028010_DEPTH_X8_24_FLOAT:
	case V_028010_DEPTH_8_24_FLOAT:
	case V_028010_DEPTH_32_FLOAT:
		bpe = 4;
		break;
	case V_028010_DEPTH_X24_8_32_FLOAT:
		bpe = 8;
		break;
	default:
		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
		return -EINVAL;
	}
	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
		if (!track->db_depth_size_idx) {
			dev_warn(p->dev, "z/stencil buffer size not set\n");
			return -EINVAL;
		}
		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
		tmp = (tmp / bpe) >> 6;
		if (!tmp) {
			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
					track->db_depth_size, bpe, track->db_offset,
					radeon_bo_size(track->db_bo));
			return -EINVAL;
		}
		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
	} else {
		size = radeon_bo_size(track->db_bo);
		/* pitch in pixels */
		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
		slice_tile_max *= 64;
		height = slice_tile_max / pitch;
		if (height > 8192)
			height = 8192;
		base_offset = track->db_bo_mc + track->db_offset;
		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
		array_check.array_mode = array_mode;
		array_check.group_size = track->group_size;
		array_check.nbanks = track->nbanks;
		array_check.npipes = track->npipes;
		array_check.nsamples = track->nsamples;
		array_check.blocksize = bpe;
		if (r600_get_array_mode_alignment(&array_check,
					&pitch_align, &height_align, &depth_align, &base_align)) {
			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					G_028010_ARRAY_MODE(track->db_depth_info),
					track->db_depth_info);
			return -EINVAL;
		}
		switch (array_mode) {
		case V_028010_ARRAY_1D_TILED_THIN1:
			/* don't break userspace */
			height &= ~0x7;
			break;
		case V_028010_ARRAY_2D_TILED_THIN1:
			break;
		default:
			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					G_028010_ARRAY_MODE(track->db_depth_info),
					track->db_depth_info);
			return -EINVAL;
		}

		if (!IS_ALIGNED(pitch, pitch_align)) {
			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
					__func__, __LINE__, pitch, pitch_align, array_mode);
			return -EINVAL;
		}
		if (!IS_ALIGNED(height, height_align)) {
			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
					__func__, __LINE__, height, height_align, array_mode);
			return -EINVAL;
		}
		if (!IS_ALIGNED(base_offset, base_align)) {
			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
					base_offset, base_align, array_mode);
			return -EINVAL;
		}

		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
621
		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
					array_mode,
					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
					radeon_bo_size(track->db_bo));
			return -EINVAL;
		}
	}

	/* hyperz */
	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
		unsigned long size;
		unsigned nbx, nby;

		if (track->htile_bo == NULL) {
			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
				 __func__, __LINE__, track->db_depth_info);
			return -EINVAL;
		}
		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
				 __func__, __LINE__, track->db_depth_size);
			return -EINVAL;
		}

		nbx = pitch;
		nby = height;
		if (G_028D24_LINEAR(track->htile_surface)) {
			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
			nbx = round_up(nbx, 16 * 8);
			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
			nby = round_up(nby, track->npipes * 8);
		} else {
655
			/* always assume 8x8 htile */
656 657 658 659 660
			/* align is htile align * 8, htile align vary according to
			 * number of pipe and tile width and nby
			 */
			switch (track->npipes) {
			case 8:
661 662 663
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 64 * 8);
				nby = round_up(nby, 64 * 8);
664 665
				break;
			case 4:
666 667 668
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 64 * 8);
				nby = round_up(nby, 32 * 8);
669 670
				break;
			case 2:
671 672 673
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 32 * 8);
				nby = round_up(nby, 32 * 8);
674 675
				break;
			case 1:
676 677 678
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 32 * 8);
				nby = round_up(nby, 16 * 8);
679 680 681 682 683 684 685 686
				break;
			default:
				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
					 __func__, __LINE__, track->npipes);
				return -EINVAL;
			}
		}
		/* compute number of htile */
687 688 689 690
		nbx = nbx >> 3;
		nby = nby >> 3;
		/* size must be aligned on npipes * 2K boundary */
		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
691 692 693 694 695 696 697 698 699 700 701 702 703 704
		size += track->htile_offset;

		if (size > radeon_bo_size(track->htile_bo)) {
			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
				 size, nbx, nby);
			return -EINVAL;
		}
	}

	track->db_dirty = false;
	return 0;
}

705 706 707 708 709 710 711 712 713
static int r600_cs_track_check(struct radeon_cs_parser *p)
{
	struct r600_cs_track *track = p->track;
	u32 tmp;
	int r, i;

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
714 715

	/* check streamout */
716
	if (track->streamout_dirty && track->vgt_strmout_en) {
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		for (i = 0; i < 4; i++) {
			if (track->vgt_strmout_buffer_en & (1 << i)) {
				if (track->vgt_strmout_bo[i]) {
					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
						(u64)track->vgt_strmout_size[i];
					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
							  i, offset,
							  radeon_bo_size(track->vgt_strmout_bo[i]));
						return -EINVAL;
					}
				} else {
					dev_warn(p->dev, "No buffer for streamout %d\n", i);
					return -EINVAL;
				}
			}
		}
734
		track->streamout_dirty = false;
735
	}
736

737 738 739
	if (track->sx_misc_kill_all_prims)
		return 0;

740 741 742
	/* check that we have a cb for each enabled target, we don't check
	 * shader_mask because it seems mesa isn't always setting it :(
	 */
743 744
	if (track->cb_dirty) {
		tmp = track->cb_target_mask;
745 746 747 748 749 750

		/* We must check both colorbuffers for RESOLVE. */
		if (track->is_resolve) {
			tmp |= 0xff;
		}

751
		for (i = 0; i < 8; i++) {
752 753 754 755
			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);

			if (format != V_0280A0_COLOR_INVALID &&
			    (tmp >> (i * 4)) & 0xF) {
756 757 758 759 760 761 762 763 764 765
				/* at least one component is enabled */
				if (track->cb_color_bo[i] == NULL) {
					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
					return -EINVAL;
				}
				/* perform rewrite of CB_COLOR[0-7]_SIZE */
				r = r600_cs_track_validate_cb(p, i);
				if (r)
					return r;
766 767
			}
		}
768
		track->cb_dirty = false;
769
	}
770

771
	/* Check depth buffer */
772 773 774 775
	if (track->db_dirty &&
	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
	     G_028800_Z_ENABLE(track->db_depth_control))) {
776 777 778
		r = r600_cs_track_validate_db(p);
		if (r)
			return r;
779
	}
780

781 782 783
	return 0;
}

784
/**
785
 * r600_cs_packet_parse_vline() - parse userspace VLINE packet
786 787
 * @parser:		parser structure holding parsing context.
 *
788 789 790 791 792 793
 * This is an R600-specific function for parsing VLINE packets.
 * Real work is done by r600_cs_common_vline_parse function.
 * Here we just set up ASIC-specific register table and call
 * the common implementation function.
 */
static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
794
{
795 796 797 798
	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
					      AVIVO_D2MODE_VLINE_START_END};
	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
					   AVIVO_D2MODE_VLINE_STATUS};
799

800
	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
801 802
}

803
/**
804
 * r600_cs_common_vline_parse() - common vline parser
805
 * @parser:		parser structure holding parsing context.
806 807
 * @vline_start_end:    table of vline_start_end registers
 * @vline_status:       table of vline_status registers
808 809 810 811 812 813 814 815 816
 *
 * Userspace sends a special sequence for VLINE waits.
 * PACKET0 - VLINE_START_END + value
 * PACKET3 - WAIT_REG_MEM poll vline status reg
 * RELOC (P3) - crtc_id in reloc.
 *
 * This function parses this and relocates the VLINE START END
 * and WAIT_REG_MEM packets to the correct crtc.
 * It also detects a switched off crtc and nulls out the
817 818 819 820 821 822
 * wait in that case. This function is common for all ASICs that
 * are R600 and newer. The parsing algorithm is the same, and only
 * differs in which registers are used.
 *
 * Caller is the ASIC-specific function which passes the parser
 * context and ASIC-specific register table
823
 */
824 825 826
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
			       uint32_t *vline_start_end,
			       uint32_t *vline_status)
827 828 829 830 831 832 833 834 835
{
	struct drm_crtc *crtc;
	struct radeon_crtc *radeon_crtc;
	struct radeon_cs_packet p3reloc, wait_reg_mem;
	int crtc_id;
	int r;
	uint32_t header, h_idx, reg, wait_reg_mem_info;
	volatile uint32_t *ib;

836
	ib = p->ib.ptr;
837 838

	/* parse the WAIT_REG_MEM */
839
	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
840 841 842 843
	if (r)
		return r;

	/* check its a WAIT_REG_MEM */
844
	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
845 846
	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
847
		return -EINVAL;
848 849 850 851 852
	}

	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
	/* bit 4 is reg (0) or mem (1) */
	if (wait_reg_mem_info & 0x10) {
853
		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
854
		return -EINVAL;
855
	}
856 857 858
	/* bit 8 is me (0) or pfp (1) */
	if (wait_reg_mem_info & 0x100) {
		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
859
		return -EINVAL;
860 861 862 863
	}
	/* waiting for value to be equal */
	if ((wait_reg_mem_info & 0x7) != 0x3) {
		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
864
		return -EINVAL;
865
	}
866
	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
867
		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
868
		return -EINVAL;
869 870
	}

871
	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
872
		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
873
		return -EINVAL;
874 875 876
	}

	/* jump over the NOP */
877
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
878 879 880 881 882 883 884 885 886
	if (r)
		return r;

	h_idx = p->idx - 2;
	p->idx += wait_reg_mem.count + 2;
	p->idx += p3reloc.count + 2;

	header = radeon_get_ib_value(p, h_idx);
	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
887
	reg = R600_CP_PACKET0_GET_REG(header);
888

R
Rob Clark 已提交
889 890
	crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
	if (!crtc) {
891
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
892
		return -ENOENT;
893 894 895 896 897
	}
	radeon_crtc = to_radeon_crtc(crtc);
	crtc_id = radeon_crtc->crtc_id;

	if (!crtc->enabled) {
898
		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
899 900 901 902 903 904 905
		ib[h_idx + 2] = PACKET2(0);
		ib[h_idx + 3] = PACKET2(0);
		ib[h_idx + 4] = PACKET2(0);
		ib[h_idx + 5] = PACKET2(0);
		ib[h_idx + 6] = PACKET2(0);
		ib[h_idx + 7] = PACKET2(0);
		ib[h_idx + 8] = PACKET2(0);
906 907 908
	} else if (reg == vline_start_end[0]) {
		header &= ~R600_CP_PACKET0_REG_MASK;
		header |= vline_start_end[crtc_id] >> 2;
909
		ib[h_idx] = header;
910 911 912 913
		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
	} else {
		DRM_ERROR("unknown crtc reloc\n");
		return -EINVAL;
914
	}
915
	return 0;
916 917
}

918 919 920 921
static int r600_packet0_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt,
				unsigned idx, unsigned reg)
{
922 923
	int r;

924 925
	switch (reg) {
	case AVIVO_D1MODE_VLINE_START_END:
926 927 928 929 930 931
		r = r600_cs_packet_parse_vline(p);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
					idx, reg);
			return r;
		}
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
		break;
	default:
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
		       reg, idx);
		return -EINVAL;
	}
	return 0;
}

static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
	unsigned reg, i;
	unsigned idx;
	int r;

	idx = pkt->idx + 1;
	reg = pkt->reg;
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
		r = r600_packet0_check(p, pkt, idx, reg);
		if (r) {
			return r;
		}
	}
	return 0;
}

959 960 961 962 963 964 965 966 967 968
/**
 * r600_cs_check_reg() - check if register is authorized or not
 * @parser: parser structure holding parsing context
 * @reg: register we are testing
 * @idx: index into the cs buffer
 *
 * This function will test against r600_reg_safe_bm and return 0
 * if register is safe. If register is not flag as safe this function
 * will test it against a list of register needind special handling.
 */
969
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
970 971
{
	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
972
	struct radeon_bo_list *reloc;
973 974 975 976
	u32 m, i, tmp, *ib;
	int r;

	i = (reg >> 7);
977
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
978 979 980 981 982 983
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return 0;
984
	ib = p->ib.ptr;
985
	switch (reg) {
L
Lucas De Marchi 已提交
986
	/* force following reg to 0 in an attempt to disable out buffer
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	 * which will need us to better understand how it works to perform
	 * security check on it (Jerome)
	 */
	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
	case R_008C44_SQ_ESGS_RING_SIZE:
	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
	case R_008C54_SQ_ESTMP_RING_SIZE:
	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
	case R_008C74_SQ_FBUF_RING_SIZE:
	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
	case R_008C5C_SQ_GSTMP_RING_SIZE:
	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
	case R_008C4C_SQ_GSVS_RING_SIZE:
	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
	case R_008C6C_SQ_PSTMP_RING_SIZE:
	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
	case R_008C7C_SQ_REDUC_RING_SIZE:
	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
	case R_008C64_SQ_VSTMP_RING_SIZE:
	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
		/* get value to populate the IB don't remove */
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
		/*tmp =radeon_get_ib_value(p, idx);
		  ib[idx] = 0;*/
		break;
	case SQ_ESGS_RING_BASE:
	case SQ_GSVS_RING_BASE:
	case SQ_ESTMP_RING_BASE:
	case SQ_GSTMP_RING_BASE:
	case SQ_PSTMP_RING_BASE:
	case SQ_VSTMP_RING_BASE:
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1023
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1024
		break;
1025 1026 1027
	case SQ_CONFIG:
		track->sq_config = radeon_get_ib_value(p, idx);
		break;
1028 1029
	case R_028800_DB_DEPTH_CONTROL:
		track->db_depth_control = radeon_get_ib_value(p, idx);
1030
		track->db_dirty = true;
1031 1032
		break;
	case R_028010_DB_DEPTH_INFO:
1033
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1034
		    radeon_cs_packet_next_is_pkt3_nop(p)) {
1035
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1036 1037 1038 1039 1040 1041 1042 1043
			if (r) {
				dev_warn(p->dev, "bad SET_CONTEXT_REG "
					 "0x%04X\n", reg);
				return -EINVAL;
			}
			track->db_depth_info = radeon_get_ib_value(p, idx);
			ib[idx] &= C_028010_ARRAY_MODE;
			track->db_depth_info &= C_028010_ARRAY_MODE;
1044
			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1045 1046 1047 1048 1049 1050
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
			} else {
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
			}
1051
		} else {
1052
			track->db_depth_info = radeon_get_ib_value(p, idx);
1053 1054
		}
		track->db_dirty = true;
1055 1056 1057
		break;
	case R_028004_DB_DEPTH_VIEW:
		track->db_depth_view = radeon_get_ib_value(p, idx);
1058
		track->db_dirty = true;
1059 1060 1061 1062
		break;
	case R_028000_DB_DEPTH_SIZE:
		track->db_depth_size = radeon_get_ib_value(p, idx);
		track->db_depth_size_idx = idx;
1063
		track->db_dirty = true;
1064 1065 1066
		break;
	case R_028AB0_VGT_STRMOUT_EN:
		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1067
		track->streamout_dirty = true;
1068 1069 1070
		break;
	case R_028B20_VGT_STRMOUT_BUFFER_EN:
		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1071
		track->streamout_dirty = true;
1072
		break;
1073 1074 1075 1076
	case VGT_STRMOUT_BUFFER_BASE_0:
	case VGT_STRMOUT_BUFFER_BASE_1:
	case VGT_STRMOUT_BUFFER_BASE_2:
	case VGT_STRMOUT_BUFFER_BASE_3:
1077
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1078 1079 1080 1081 1082 1083 1084
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1085
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1086
		track->vgt_strmout_bo[tmp] = reloc->robj;
1087
		track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
1088
		track->streamout_dirty = true;
1089 1090 1091 1092 1093 1094 1095 1096
		break;
	case VGT_STRMOUT_BUFFER_SIZE_0:
	case VGT_STRMOUT_BUFFER_SIZE_1:
	case VGT_STRMOUT_BUFFER_SIZE_2:
	case VGT_STRMOUT_BUFFER_SIZE_3:
		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
		/* size in register is DWs, convert to bytes */
		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1097
		track->streamout_dirty = true;
1098 1099
		break;
	case CP_COHER_BASE:
1100
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1101 1102 1103 1104 1105
		if (r) {
			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1106
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1107
		break;
1108 1109
	case R_028238_CB_TARGET_MASK:
		track->cb_target_mask = radeon_get_ib_value(p, idx);
1110
		track->cb_dirty = true;
1111 1112 1113 1114 1115 1116
		break;
	case R_02823C_CB_SHADER_MASK:
		track->cb_shader_mask = radeon_get_ib_value(p, idx);
		break;
	case R_028C04_PA_SC_AA_CONFIG:
		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1117
		track->log_nsamples = tmp;
1118
		track->nsamples = 1 << tmp;
1119
		track->cb_dirty = true;
1120
		break;
1121 1122 1123 1124 1125
	case R_028808_CB_COLOR_CONTROL:
		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
		track->cb_dirty = true;
		break;
1126 1127 1128 1129 1130 1131 1132 1133
	case R_0280A0_CB_COLOR0_INFO:
	case R_0280A4_CB_COLOR1_INFO:
	case R_0280A8_CB_COLOR2_INFO:
	case R_0280AC_CB_COLOR3_INFO:
	case R_0280B0_CB_COLOR4_INFO:
	case R_0280B4_CB_COLOR5_INFO:
	case R_0280B8_CB_COLOR6_INFO:
	case R_0280BC_CB_COLOR7_INFO:
1134
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1135
		     radeon_cs_packet_next_is_pkt3_nop(p)) {
1136
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1137 1138 1139 1140 1141 1142
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1143
			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1144 1145
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1146
			} else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1147 1148 1149 1150 1151 1152 1153
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
			}
		} else {
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
		}
1154
		track->cb_dirty = true;
1155
		break;
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	case R_028080_CB_COLOR0_VIEW:
	case R_028084_CB_COLOR1_VIEW:
	case R_028088_CB_COLOR2_VIEW:
	case R_02808C_CB_COLOR3_VIEW:
	case R_028090_CB_COLOR4_VIEW:
	case R_028094_CB_COLOR5_VIEW:
	case R_028098_CB_COLOR6_VIEW:
	case R_02809C_CB_COLOR7_VIEW:
		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1166
		track->cb_dirty = true;
1167
		break;
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
	case R_028060_CB_COLOR0_SIZE:
	case R_028064_CB_COLOR1_SIZE:
	case R_028068_CB_COLOR2_SIZE:
	case R_02806C_CB_COLOR3_SIZE:
	case R_028070_CB_COLOR4_SIZE:
	case R_028074_CB_COLOR5_SIZE:
	case R_028078_CB_COLOR6_SIZE:
	case R_02807C_CB_COLOR7_SIZE:
		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
		track->cb_color_size_idx[tmp] = idx;
1179
		track->cb_dirty = true;
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
		break;
		/* This register were added late, there is userspace
		 * which does provide relocation for those but set
		 * 0 offset. In order to avoid breaking old userspace
		 * we detect this and set address to point to last
		 * CB_COLOR0_BASE, note that if userspace doesn't set
		 * CB_COLOR0_BASE before this register we will report
		 * error. Old userspace always set CB_COLOR0_BASE
		 * before any of this.
		 */
	case R_0280E0_CB_COLOR0_FRAG:
	case R_0280E4_CB_COLOR1_FRAG:
	case R_0280E8_CB_COLOR2_FRAG:
	case R_0280EC_CB_COLOR3_FRAG:
	case R_0280F0_CB_COLOR4_FRAG:
	case R_0280F4_CB_COLOR5_FRAG:
	case R_0280F8_CB_COLOR6_FRAG:
	case R_0280FC_CB_COLOR7_FRAG:
		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1199
		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1200 1201 1202 1203 1204
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1205 1206
			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
			ib[idx] = track->cb_color_base_last[tmp];
1207
		} else {
1208
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1209 1210 1211 1212 1213
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			track->cb_color_frag_bo[tmp] = reloc->robj;
1214
			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1215
			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1216 1217 1218
		}
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
		}
		break;
	case R_0280C0_CB_COLOR0_TILE:
	case R_0280C4_CB_COLOR1_TILE:
	case R_0280C8_CB_COLOR2_TILE:
	case R_0280CC_CB_COLOR3_TILE:
	case R_0280D0_CB_COLOR4_TILE:
	case R_0280D4_CB_COLOR5_TILE:
	case R_0280D8_CB_COLOR6_TILE:
	case R_0280DC_CB_COLOR7_TILE:
		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1230
		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1231 1232 1233 1234 1235
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1236 1237
			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
			ib[idx] = track->cb_color_base_last[tmp];
1238
		} else {
1239
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1240 1241 1242 1243 1244
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			track->cb_color_tile_bo[tmp] = reloc->robj;
1245
			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1246
			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
		}
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
		}
		break;
	case R_028100_CB_COLOR0_MASK:
	case R_028104_CB_COLOR1_MASK:
	case R_028108_CB_COLOR2_MASK:
	case R_02810C_CB_COLOR3_MASK:
	case R_028110_CB_COLOR4_MASK:
	case R_028114_CB_COLOR5_MASK:
	case R_028118_CB_COLOR6_MASK:
	case R_02811C_CB_COLOR7_MASK:
		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1261
		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1262 1263
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
		}
		break;
	case CB_COLOR0_BASE:
	case CB_COLOR1_BASE:
	case CB_COLOR2_BASE:
	case CB_COLOR3_BASE:
	case CB_COLOR4_BASE:
	case CB_COLOR5_BASE:
	case CB_COLOR6_BASE:
	case CB_COLOR7_BASE:
1274
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1275 1276 1277 1278 1279
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1280
		tmp = (reg - CB_COLOR0_BASE) / 4;
1281
		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1282
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1283 1284
		track->cb_color_base_last[tmp] = ib[idx];
		track->cb_color_bo[tmp] = reloc->robj;
1285
		track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
1286
		track->cb_dirty = true;
1287 1288
		break;
	case DB_DEPTH_BASE:
1289
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1290 1291 1292 1293 1294
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1295
		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1296
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1297
		track->db_bo = reloc->robj;
1298
		track->db_bo_mc = reloc->gpu_offset;
1299
		track->db_dirty = true;
1300 1301
		break;
	case DB_HTILE_DATA_BASE:
1302
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1303 1304 1305 1306 1307 1308
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1309
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1310 1311 1312 1313 1314
		track->htile_bo = reloc->robj;
		track->db_dirty = true;
		break;
	case DB_HTILE_SURFACE:
		track->htile_surface = radeon_get_ib_value(p, idx);
1315 1316
		/* force 8x8 htile width and height */
		ib[idx] |= 3;
1317 1318
		track->db_dirty = true;
		break;
1319 1320 1321 1322 1323
	case SQ_PGM_START_FS:
	case SQ_PGM_START_ES:
	case SQ_PGM_START_VS:
	case SQ_PGM_START_GS:
	case SQ_PGM_START_PS:
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	case SQ_ALU_CONST_CACHE_GS_0:
	case SQ_ALU_CONST_CACHE_GS_1:
	case SQ_ALU_CONST_CACHE_GS_2:
	case SQ_ALU_CONST_CACHE_GS_3:
	case SQ_ALU_CONST_CACHE_GS_4:
	case SQ_ALU_CONST_CACHE_GS_5:
	case SQ_ALU_CONST_CACHE_GS_6:
	case SQ_ALU_CONST_CACHE_GS_7:
	case SQ_ALU_CONST_CACHE_GS_8:
	case SQ_ALU_CONST_CACHE_GS_9:
	case SQ_ALU_CONST_CACHE_GS_10:
	case SQ_ALU_CONST_CACHE_GS_11:
	case SQ_ALU_CONST_CACHE_GS_12:
	case SQ_ALU_CONST_CACHE_GS_13:
	case SQ_ALU_CONST_CACHE_GS_14:
	case SQ_ALU_CONST_CACHE_GS_15:
	case SQ_ALU_CONST_CACHE_PS_0:
	case SQ_ALU_CONST_CACHE_PS_1:
	case SQ_ALU_CONST_CACHE_PS_2:
	case SQ_ALU_CONST_CACHE_PS_3:
	case SQ_ALU_CONST_CACHE_PS_4:
	case SQ_ALU_CONST_CACHE_PS_5:
	case SQ_ALU_CONST_CACHE_PS_6:
	case SQ_ALU_CONST_CACHE_PS_7:
	case SQ_ALU_CONST_CACHE_PS_8:
	case SQ_ALU_CONST_CACHE_PS_9:
	case SQ_ALU_CONST_CACHE_PS_10:
	case SQ_ALU_CONST_CACHE_PS_11:
	case SQ_ALU_CONST_CACHE_PS_12:
	case SQ_ALU_CONST_CACHE_PS_13:
	case SQ_ALU_CONST_CACHE_PS_14:
	case SQ_ALU_CONST_CACHE_PS_15:
	case SQ_ALU_CONST_CACHE_VS_0:
	case SQ_ALU_CONST_CACHE_VS_1:
	case SQ_ALU_CONST_CACHE_VS_2:
	case SQ_ALU_CONST_CACHE_VS_3:
	case SQ_ALU_CONST_CACHE_VS_4:
	case SQ_ALU_CONST_CACHE_VS_5:
	case SQ_ALU_CONST_CACHE_VS_6:
	case SQ_ALU_CONST_CACHE_VS_7:
	case SQ_ALU_CONST_CACHE_VS_8:
	case SQ_ALU_CONST_CACHE_VS_9:
	case SQ_ALU_CONST_CACHE_VS_10:
	case SQ_ALU_CONST_CACHE_VS_11:
	case SQ_ALU_CONST_CACHE_VS_12:
	case SQ_ALU_CONST_CACHE_VS_13:
	case SQ_ALU_CONST_CACHE_VS_14:
	case SQ_ALU_CONST_CACHE_VS_15:
1372
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1373 1374 1375 1376
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1377
		}
1378
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1379 1380
		break;
	case SX_MEMORY_EXPORT_BASE:
1381
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1382 1383 1384 1385
		if (r) {
			dev_warn(p->dev, "bad SET_CONFIG_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1386
		}
1387
		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1388
		break;
1389 1390 1391
	case SX_MISC:
		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
		break;
1392 1393 1394 1395 1396 1397 1398
	default:
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	return 0;
}

1399
unsigned r600_mip_minify(unsigned size, unsigned level)
1400
{
1401 1402 1403 1404 1405 1406
	unsigned val;

	val = max(1U, size >> level);
	if (level > 0)
		val = roundup_pow_of_two(val);
	return val;
1407 1408
}

1409
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1410
			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1411
			      unsigned block_align, unsigned height_align, unsigned base_align,
1412
			      unsigned *l0_size, unsigned *mipmap_size)
1413
{
1414 1415 1416 1417 1418
	unsigned offset, i, level;
	unsigned width, height, depth, size;
	unsigned blocksize;
	unsigned nbx, nby;
	unsigned nlevels = llevel - blevel + 1;
1419

1420
	*l0_size = -1;
1421
	blocksize = r600_fmt_get_blocksize(format);
1422

1423 1424 1425
	w0 = r600_mip_minify(w0, 0);
	h0 = r600_mip_minify(h0, 0);
	d0 = r600_mip_minify(d0, 0);
1426
	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1427 1428
		width = r600_mip_minify(w0, i);
		nbx = r600_fmt_get_nblocksx(format, width);
1429 1430 1431

		nbx = round_up(nbx, block_align);

1432 1433
		height = r600_mip_minify(h0, i);
		nby = r600_fmt_get_nblocksy(format, height);
1434 1435
		nby = round_up(nby, height_align);

1436
		depth = r600_mip_minify(d0, i);
1437

1438
		size = nbx * nby * blocksize * nsamples;
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
		if (nfaces)
			size *= nfaces;
		else
			size *= depth;

		if (i == 0)
			*l0_size = size;

		if (i == 0 || i == 1)
			offset = round_up(offset, base_align);

		offset += size;
1451 1452
	}
	*mipmap_size = offset;
1453
	if (llevel == 0)
1454
		*mipmap_size = *l0_size;
1455 1456
	if (!blevel)
		*mipmap_size -= *l0_size;
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
}

/**
 * r600_check_texture_resource() - check if register is authorized or not
 * @p: parser structure holding parsing context
 * @idx: index into the cs buffer
 * @texture: texture's bo structure
 * @mipmap: mipmap's bo structure
 *
 * This function will check that the resource has valid field and that
 * the texture and mipmap bo object are big enough to cover this resource.
 */
1469
static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1470 1471
					      struct radeon_bo *texture,
					      struct radeon_bo *mipmap,
1472 1473
					      u64 base_offset,
					      u64 mip_offset,
1474
					      u32 tiling_flags)
1475
{
1476
	struct r600_cs_track *track = p->track;
1477 1478
	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1479
	u32 height_align, pitch, pitch_align, depth_align;
1480
	u32 barray, larray;
1481 1482
	u64 base_align;
	struct array_mode_checker array_check;
1483
	u32 format;
1484
	bool is_array;
1485 1486 1487 1488

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
1489

1490 1491 1492 1493
	/* convert to bytes */
	base_offset <<= 8;
	mip_offset <<= 8;

1494
	word0 = radeon_get_ib_value(p, idx + 0);
1495
	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1496 1497 1498 1499 1500
		if (tiling_flags & RADEON_TILING_MACRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
		else if (tiling_flags & RADEON_TILING_MICRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
	}
1501
	word1 = radeon_get_ib_value(p, idx + 1);
1502 1503 1504 1505 1506
	word2 = radeon_get_ib_value(p, idx + 2) << 8;
	word3 = radeon_get_ib_value(p, idx + 3) << 8;
	word4 = radeon_get_ib_value(p, idx + 4);
	word5 = radeon_get_ib_value(p, idx + 5);
	dim = G_038000_DIM(word0);
1507
	w0 = G_038000_TEX_WIDTH(word0) + 1;
1508
	pitch = (G_038000_PITCH(word0) + 1) * 8;
1509 1510
	h0 = G_038004_TEX_HEIGHT(word1) + 1;
	d0 = G_038004_TEX_DEPTH(word1);
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	format = G_038004_DATA_FORMAT(word1);
	blevel = G_038010_BASE_LEVEL(word4);
	llevel = G_038014_LAST_LEVEL(word5);
	/* pitch in texels */
	array_check.array_mode = G_038000_TILE_MODE(word0);
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
	array_check.nsamples = 1;
	array_check.blocksize = r600_fmt_get_blocksize(format);
1521
	nfaces = 1;
1522 1523
	is_array = false;
	switch (dim) {
1524 1525 1526 1527 1528
	case V_038000_SQ_TEX_DIM_1D:
	case V_038000_SQ_TEX_DIM_2D:
	case V_038000_SQ_TEX_DIM_3D:
		break;
	case V_038000_SQ_TEX_DIM_CUBEMAP:
1529 1530 1531 1532
		if (p->family >= CHIP_RV770)
			nfaces = 8;
		else
			nfaces = 6;
1533 1534 1535
		break;
	case V_038000_SQ_TEX_DIM_1D_ARRAY:
	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1536
		is_array = true;
1537
		break;
1538
	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1539 1540 1541 1542 1543 1544
		is_array = true;
		/* fall through */
	case V_038000_SQ_TEX_DIM_2D_MSAA:
		array_check.nsamples = 1 << llevel;
		llevel = 0;
		break;
1545 1546 1547 1548
	default:
		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
		return -EINVAL;
	}
1549
	if (!r600_fmt_is_valid_texture(format, p->family)) {
1550
		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1551
			 __func__, __LINE__, format);
1552 1553
		return -EINVAL;
	}
1554

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
			 __func__, __LINE__, G_038000_TILE_MODE(word0));
		return -EINVAL;
	}

	/* XXX check height as well... */

	if (!IS_ALIGNED(pitch, pitch_align)) {
1565 1566
		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1567 1568 1569
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
1570 1571
		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1572 1573 1574
		return -EINVAL;
	}
	if (!IS_ALIGNED(mip_offset, base_align)) {
1575 1576
		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1577 1578 1579
		return -EINVAL;
	}

1580 1581 1582 1583
	if (blevel > llevel) {
		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
			 blevel, llevel);
	}
1584 1585 1586
	if (is_array) {
		barray = G_038014_BASE_ARRAY(word5);
		larray = G_038014_LAST_ARRAY(word5);
1587 1588 1589

		nfaces = larray - barray + 1;
	}
1590
	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1591
			  pitch_align, height_align, base_align,
1592
			  &l0_size, &mipmap_size);
1593
	/* using get ib will give us the offset into the texture bo */
1594
	if ((l0_size + word2) > radeon_bo_size(texture)) {
1595 1596 1597 1598
		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
			 w0, h0, pitch_align, height_align,
			 array_check.array_mode, format, word2,
			 l0_size, radeon_bo_size(texture));
1599
		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1600 1601 1602
		return -EINVAL;
	}
	/* using get ib will give us the offset into the mipmap bo */
1603
	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1604
		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1605
		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1606 1607 1608 1609
	}
	return 0;
}

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
	u32 m, i;

	i = (reg >> 7);
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return false;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return true;
	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
	return false;
}

1626 1627 1628
static int r600_packet3_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
1629
	struct radeon_bo_list *reloc;
1630
	struct r600_cs_track *track;
1631 1632 1633 1634 1635
	volatile u32 *ib;
	unsigned idx;
	unsigned i;
	unsigned start_reg, end_reg, reg;
	int r;
1636
	u32 idx_value;
1637

1638
	track = (struct r600_cs_track *)p->track;
1639
	ib = p->ib.ptr;
1640
	idx = pkt->idx + 1;
1641
	idx_value = radeon_get_ib_value(p, idx);
1642

1643
	switch (pkt->opcode) {
1644 1645 1646 1647
	case PACKET3_SET_PREDICATION:
	{
		int pred_op;
		int tmp;
1648 1649
		uint64_t offset;

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
		if (pkt->count != 1) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

		tmp = radeon_get_ib_value(p, idx + 1);
		pred_op = (tmp >> 16) & 0x7;

		/* for the clear predicate operation */
		if (pred_op == 0)
			return 0;

		if (pred_op > 2) {
			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
			return -EINVAL;
		}

1667
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1668 1669 1670 1671 1672
		if (r) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

1673
		offset = reloc->gpu_offset +
1674 1675 1676 1677 1678
		         (idx_value & 0xfffffff0) +
		         ((u64)(tmp & 0xff) << 32);

		ib[idx + 0] = offset;
		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1679 1680 1681
	}
	break;

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
	case PACKET3_START_3D_CMDBUF:
		if (p->family >= CHIP_RV770 || pkt->count) {
			DRM_ERROR("bad START_3D\n");
			return -EINVAL;
		}
		break;
	case PACKET3_CONTEXT_CONTROL:
		if (pkt->count != 1) {
			DRM_ERROR("bad CONTEXT_CONTROL\n");
			return -EINVAL;
		}
		break;
	case PACKET3_INDEX_TYPE:
	case PACKET3_NUM_INSTANCES:
		if (pkt->count) {
			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
			return -EINVAL;
		}
		break;
	case PACKET3_DRAW_INDEX:
1702 1703
	{
		uint64_t offset;
1704 1705 1706 1707
		if (pkt->count != 3) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
1708
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1709 1710 1711 1712
		if (r) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
1713

1714
		offset = reloc->gpu_offset +
1715 1716 1717 1718 1719 1720
		         idx_value +
		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);

		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;

1721 1722 1723 1724 1725
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1726
		break;
1727
	}
1728 1729 1730 1731 1732
	case PACKET3_DRAW_INDEX_AUTO:
		if (pkt->count != 1) {
			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
			return -EINVAL;
		}
1733 1734 1735 1736 1737
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
			return r;
		}
1738 1739 1740 1741 1742 1743 1744
		break;
	case PACKET3_DRAW_INDEX_IMMD_BE:
	case PACKET3_DRAW_INDEX_IMMD:
		if (pkt->count < 2) {
			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
			return -EINVAL;
		}
1745 1746 1747 1748 1749
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1750 1751 1752 1753 1754 1755 1756
		break;
	case PACKET3_WAIT_REG_MEM:
		if (pkt->count != 5) {
			DRM_ERROR("bad WAIT_REG_MEM\n");
			return -EINVAL;
		}
		/* bit 4 is reg (0) or mem (1) */
1757
		if (idx_value & 0x10) {
1758 1759
			uint64_t offset;

1760
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1761 1762 1763 1764
			if (r) {
				DRM_ERROR("bad WAIT_REG_MEM\n");
				return -EINVAL;
			}
1765

1766
			offset = reloc->gpu_offset +
1767 1768 1769 1770 1771
			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
			ib[idx+2] = upper_32_bits(offset) & 0xff;
1772 1773 1774
		} else if (idx_value & 0x100) {
			DRM_ERROR("cannot use PFP on REG wait\n");
			return -EINVAL;
1775 1776
		}
		break;
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	case PACKET3_CP_DMA:
	{
		u32 command, size;
		u64 offset, tmp;
		if (pkt->count != 4) {
			DRM_ERROR("bad CP DMA\n");
			return -EINVAL;
		}
		command = radeon_get_ib_value(p, idx+4);
		size = command & 0x1fffff;
		if (command & PACKET3_CP_DMA_CMD_SAS) {
			/* src address space is register */
			DRM_ERROR("CP DMA SAS not supported\n");
			return -EINVAL;
		} else {
			if (command & PACKET3_CP_DMA_CMD_SAIC) {
				DRM_ERROR("CP DMA SAIC only supported for registers\n");
				return -EINVAL;
			}
			/* src address space is memory */
1797
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1798 1799 1800 1801 1802 1803 1804 1805
			if (r) {
				DRM_ERROR("bad CP DMA SRC\n");
				return -EINVAL;
			}

			tmp = radeon_get_ib_value(p, idx) +
				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);

1806
			offset = reloc->gpu_offset + tmp;
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826

			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
					 tmp + size, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}

			ib[idx] = offset;
			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
		}
		if (command & PACKET3_CP_DMA_CMD_DAS) {
			/* dst address space is register */
			DRM_ERROR("CP DMA DAS not supported\n");
			return -EINVAL;
		} else {
			/* dst address space is memory */
			if (command & PACKET3_CP_DMA_CMD_DAIC) {
				DRM_ERROR("CP DMA DAIC only supported for registers\n");
				return -EINVAL;
			}
1827
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1828 1829 1830 1831 1832 1833 1834 1835
			if (r) {
				DRM_ERROR("bad CP DMA DST\n");
				return -EINVAL;
			}

			tmp = radeon_get_ib_value(p, idx+2) +
				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);

1836
			offset = reloc->gpu_offset + tmp;
1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848

			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
					 tmp + size, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}

			ib[idx+2] = offset;
			ib[idx+3] = upper_32_bits(offset) & 0xff;
		}
		break;
	}
1849 1850 1851 1852 1853 1854
	case PACKET3_SURFACE_SYNC:
		if (pkt->count != 3) {
			DRM_ERROR("bad SURFACE_SYNC\n");
			return -EINVAL;
		}
		/* 0xffffffff/0x0 is flush all cache flag */
1855 1856
		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
		    radeon_get_ib_value(p, idx + 2) != 0) {
1857
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1858 1859 1860 1861
			if (r) {
				DRM_ERROR("bad SURFACE_SYNC\n");
				return -EINVAL;
			}
1862
			ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1863 1864 1865 1866 1867 1868 1869 1870
		}
		break;
	case PACKET3_EVENT_WRITE:
		if (pkt->count != 2 && pkt->count != 0) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
		if (pkt->count) {
1871 1872
			uint64_t offset;

1873
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1874 1875 1876 1877
			if (r) {
				DRM_ERROR("bad EVENT_WRITE\n");
				return -EINVAL;
			}
1878
			offset = reloc->gpu_offset +
1879 1880 1881 1882 1883
			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

			ib[idx+1] = offset & 0xfffffff8;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
1884 1885 1886
		}
		break;
	case PACKET3_EVENT_WRITE_EOP:
1887 1888 1889
	{
		uint64_t offset;

1890 1891 1892 1893
		if (pkt->count != 4) {
			DRM_ERROR("bad EVENT_WRITE_EOP\n");
			return -EINVAL;
		}
1894
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1895 1896 1897 1898
		if (r) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
1899

1900
		offset = reloc->gpu_offset +
1901 1902 1903 1904 1905
		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

		ib[idx+1] = offset & 0xfffffffc;
		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1906
		break;
1907
	}
1908
	case PACKET3_SET_CONFIG_REG:
1909
		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1910 1911 1912 1913 1914 1915 1916 1917 1918
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1919 1920 1921
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1922 1923 1924
		}
		break;
	case PACKET3_SET_CONTEXT_REG:
1925
		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1926 1927 1928 1929 1930 1931 1932 1933 1934
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1935 1936 1937
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1938 1939 1940 1941 1942 1943 1944
		}
		break;
	case PACKET3_SET_RESOURCE:
		if (pkt->count % 7) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
1945
		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1946 1947 1948 1949 1950 1951 1952 1953
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
		for (i = 0; i < (pkt->count / 7); i++) {
1954
			struct radeon_bo *texture, *mipmap;
1955
			u32 size, offset, base_offset, mip_offset;
1956

1957
			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1958 1959
			case SQ_TEX_VTX_VALID_TEXTURE:
				/* tex base */
1960
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1961 1962 1963 1964
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1965
				base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1966
				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1967
					if (reloc->tiling_flags & RADEON_TILING_MACRO)
1968
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1969
					else if (reloc->tiling_flags & RADEON_TILING_MICRO)
1970 1971
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
				}
1972
				texture = reloc->robj;
1973
				/* tex mip base */
1974
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1975 1976 1977 1978
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1979
				mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1980 1981
				mipmap = reloc->robj;
				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1982 1983 1984
								texture, mipmap,
								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1985
								reloc->tiling_flags);
1986 1987
				if (r)
					return r;
1988 1989
				ib[idx+1+(i*7)+2] += base_offset;
				ib[idx+1+(i*7)+3] += mip_offset;
1990 1991
				break;
			case SQ_TEX_VTX_VALID_BUFFER:
1992 1993
			{
				uint64_t offset64;
1994
				/* vtx base */
1995
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1996 1997 1998 1999
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
2000
				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2001
				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2002 2003
				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
					/* force size to size of the buffer */
2004 2005
					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
						 size + offset, radeon_bo_size(reloc->robj));
2006
					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2007
				}
2008

2009
				offset64 = reloc->gpu_offset + offset;
2010 2011 2012
				ib[idx+1+(i*8)+0] = offset64;
				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
						    (upper_32_bits(offset64) & 0xff);
2013
				break;
2014
			}
2015 2016 2017 2018 2019 2020 2021 2022 2023
			case SQ_TEX_VTX_INVALID_TEXTURE:
			case SQ_TEX_VTX_INVALID_BUFFER:
			default:
				DRM_ERROR("bad SET_RESOURCE\n");
				return -EINVAL;
			}
		}
		break;
	case PACKET3_SET_ALU_CONST:
2024 2025 2026 2027 2028 2029 2030 2031 2032
		if (track->sq_config & DX9_CONSTS) {
			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
			end_reg = 4 * pkt->count + start_reg - 4;
			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
				DRM_ERROR("bad SET_ALU_CONST\n");
				return -EINVAL;
			}
2033 2034 2035
		}
		break;
	case PACKET3_SET_BOOL_CONST:
2036
		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2037 2038 2039 2040 2041 2042 2043 2044 2045
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
			DRM_ERROR("bad SET_BOOL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_LOOP_CONST:
2046
		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2047 2048 2049 2050 2051 2052 2053 2054 2055
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
			DRM_ERROR("bad SET_LOOP_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_CTL_CONST:
2056
		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
			DRM_ERROR("bad SET_CTL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_SAMPLER:
		if (pkt->count % 3) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
2070
		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2071 2072 2073 2074 2075 2076 2077 2078
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
		break;
2079
	case PACKET3_STRMOUT_BASE_UPDATE:
2080 2081
		/* RS780 and RS880 also need this */
		if (p->family < CHIP_RS780) {
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
			return -EINVAL;
		}
		if (pkt->count != 1) {
			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
			return -EINVAL;
		}
		if (idx_value > 3) {
			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
			return -EINVAL;
		}
		{
			u64 offset;

2096
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
			if (r) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
				return -EINVAL;
			}

			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
				return -EINVAL;
			}

			offset = radeon_get_ib_value(p, idx+1) << 8;
			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
					  offset, track->vgt_strmout_bo_offset[idx_value]);
				return -EINVAL;
			}

			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2119
			ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2120 2121
		}
		break;
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
	case PACKET3_SURFACE_BASE_UPDATE:
		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		if (pkt->count) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		break;
2132 2133 2134 2135 2136 2137 2138 2139
	case PACKET3_STRMOUT_BUFFER_UPDATE:
		if (pkt->count != 4) {
			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
			return -EINVAL;
		}
		/* Updating memory at DST_ADDRESS. */
		if (idx_value & 0x1) {
			u64 offset;
2140
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2152
			offset += reloc->gpu_offset;
2153 2154
			ib[idx+1] = offset;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
2155 2156 2157 2158
		}
		/* Reading data from SRC_ADDRESS. */
		if (((idx_value >> 1) & 0x3) == 2) {
			u64 offset;
2159
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2171
			offset += reloc->gpu_offset;
2172 2173
			ib[idx+3] = offset;
			ib[idx+4] = upper_32_bits(offset) & 0xff;
2174 2175
		}
		break;
2176 2177 2178 2179 2180 2181 2182 2183
	case PACKET3_MEM_WRITE:
	{
		u64 offset;

		if (pkt->count != 3) {
			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
			return -EINVAL;
		}
2184
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
		if (r) {
			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
			return -EINVAL;
		}
		offset = radeon_get_ib_value(p, idx+0);
		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
		if (offset & 0x7) {
			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
			return -EINVAL;
		}
		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
				  offset + 8, radeon_bo_size(reloc->robj));
			return -EINVAL;
		}
2200
		offset += reloc->gpu_offset;
2201 2202 2203 2204
		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;
		break;
	}
2205 2206 2207 2208 2209 2210 2211 2212
	case PACKET3_COPY_DW:
		if (pkt->count != 4) {
			DRM_ERROR("bad COPY_DW (invalid count)\n");
			return -EINVAL;
		}
		if (idx_value & 0x1) {
			u64 offset;
			/* SRC is memory. */
2213
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
			if (r) {
				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2225
			offset += reloc->gpu_offset;
2226 2227
			ib[idx+1] = offset;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
2228 2229 2230 2231 2232 2233 2234 2235 2236
		} else {
			/* SRC is a reg. */
			reg = radeon_get_ib_value(p, idx+1) << 2;
			if (!r600_is_safe_reg(p, reg, idx+1))
				return -EINVAL;
		}
		if (idx_value & 0x2) {
			u64 offset;
			/* DST is memory. */
2237
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
			if (r) {
				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2249
			offset += reloc->gpu_offset;
2250 2251
			ib[idx+3] = offset;
			ib[idx+4] = upper_32_bits(offset) & 0xff;
2252 2253 2254 2255 2256 2257 2258
		} else {
			/* DST is a reg. */
			reg = radeon_get_ib_value(p, idx+3) << 2;
			if (!r600_is_safe_reg(p, reg, idx+3))
				return -EINVAL;
		}
		break;
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	case PACKET3_NOP:
		break;
	default:
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
		return -EINVAL;
	}
	return 0;
}

int r600_cs_parse(struct radeon_cs_parser *p)
{
	struct radeon_cs_packet pkt;
2271
	struct r600_cs_track *track;
2272 2273
	int r;

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
	if (p->track == NULL) {
		/* initialize tracker, we are in kms */
		track = kzalloc(sizeof(*track), GFP_KERNEL);
		if (track == NULL)
			return -ENOMEM;
		r600_cs_track_init(track);
		if (p->rdev->family < CHIP_RV770) {
			track->npipes = p->rdev->config.r600.tiling_npipes;
			track->nbanks = p->rdev->config.r600.tiling_nbanks;
			track->group_size = p->rdev->config.r600.tiling_group_size;
		} else if (p->rdev->family <= CHIP_RV740) {
			track->npipes = p->rdev->config.rv770.tiling_npipes;
			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
			track->group_size = p->rdev->config.rv770.tiling_group_size;
		}
		p->track = track;
	}
2291
	do {
2292
		r = radeon_cs_packet_parse(p, &pkt, p->idx);
2293
		if (r) {
2294 2295
			kfree(p->track);
			p->track = NULL;
2296 2297 2298 2299
			return r;
		}
		p->idx += pkt.count + 2;
		switch (pkt.type) {
2300
		case RADEON_PACKET_TYPE0:
2301 2302
			r = r600_cs_parse_packet0(p, &pkt);
			break;
2303
		case RADEON_PACKET_TYPE2:
2304
			break;
2305
		case RADEON_PACKET_TYPE3:
2306 2307 2308 2309
			r = r600_packet3_check(p, &pkt);
			break;
		default:
			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2310
			kfree(p->track);
2311
			p->track = NULL;
2312 2313 2314
			return -EINVAL;
		}
		if (r) {
2315
			kfree(p->track);
2316
			p->track = NULL;
2317 2318 2319 2320
			return r;
		}
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
2321 2322
	for (r = 0; r < p->ib.length_dw; r++) {
		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2323 2324 2325
		mdelay(1);
	}
#endif
2326
	kfree(p->track);
2327
	p->track = NULL;
2328 2329 2330
	return 0;
}

2331
#ifdef CONFIG_DRM_RADEON_UMS
2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345

/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
	unsigned i;

	kfree(parser->relocs);
2346 2347
	for (i = 0; i < parser->nchunks; i++)
		drm_free_large(parser->chunks[i].kdata);
2348 2349 2350 2351
	kfree(parser->chunks);
	kfree(parser->chunks_array);
}

2352 2353 2354 2355 2356
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
	if (p->chunk_relocs_idx == -1) {
		return 0;
	}
2357
	p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
2358 2359 2360 2361 2362 2363
	if (p->relocs == NULL) {
		return -ENOMEM;
	}
	return 0;
}

2364 2365 2366 2367 2368
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
			unsigned family, u32 *ib, int *l)
{
	struct radeon_cs_parser parser;
	struct radeon_cs_chunk *ib_chunk;
2369
	struct r600_cs_track *track;
2370 2371
	int r;

2372 2373 2374 2375 2376 2377
	/* initialize tracker */
	track = kzalloc(sizeof(*track), GFP_KERNEL);
	if (track == NULL)
		return -ENOMEM;
	r600_cs_track_init(track);
	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2378 2379 2380
	/* initialize parser */
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
2381
	parser.dev = &dev->pdev->dev;
2382 2383
	parser.rdev = NULL;
	parser.family = family;
2384
	parser.track = track;
2385
	parser.ib.ptr = ib;
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	r = r600_cs_parser_relocs_legacy(&parser);
	if (r) {
		DRM_ERROR("Failed to parse relocation !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	/* Copy the packet into the IB, the parser will read from the
	 * input memory (cached) and write to the IB (which can be
	 * uncached). */
	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2402 2403
	parser.ib.length_dw = ib_chunk->length_dw;
	*l = parser.ib.length_dw;
D
Daniel Vetter 已提交
2404
	if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2405
		r = -EFAULT;
2406 2407 2408
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2409
	r = r600_cs_parse(&parser);
2410 2411 2412 2413 2414
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2415 2416 2417 2418 2419 2420
	r600_cs_parser_fini(&parser, r);
	return r;
}

void r600_cs_legacy_init(void)
{
2421
	r600_nomm = 1;
2422
}
2423

2424 2425
#endif

2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
/*
 *  DMA
 */
/**
 * r600_dma_cs_next_reloc() - parse next reloc
 * @p:		parser structure holding parsing context.
 * @cs_reloc:		reloc informations
 *
 * Return the next reloc, do bo validation and compute
 * GPU offset using the provided start.
 **/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2438
			   struct radeon_bo_list **cs_reloc)
2439 2440 2441 2442
{
	struct radeon_cs_chunk *relocs_chunk;
	unsigned idx;

2443
	*cs_reloc = NULL;
2444 2445 2446 2447 2448 2449
	if (p->chunk_relocs_idx == -1) {
		DRM_ERROR("No relocation chunk !\n");
		return -EINVAL;
	}
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
	idx = p->dma_reloc_idx;
2450
	if (idx >= p->nrelocs) {
2451
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2452
			  idx, p->nrelocs);
2453 2454
		return -EINVAL;
	}
2455
	*cs_reloc = &p->relocs[idx];
2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
	p->dma_reloc_idx++;
	return 0;
}

#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)

/**
 * r600_dma_cs_parse() - parse the DMA IB
 * @p:		parser structure holding parsing context.
 *
 * Parses the DMA IB from the CS ioctl and updates
 * the GPU addresses based on the reloc information and
 * checks for errors. (R6xx-R7xx)
 * Returns 0 for success and an error on failure.
 **/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2476
	struct radeon_bo_list *src_reloc, *dst_reloc;
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
	u32 header, cmd, count, tiled;
	volatile u32 *ib = p->ib.ptr;
	u32 idx, idx_value;
	u64 src_offset, dst_offset;
	int r;

	do {
		if (p->idx >= ib_chunk->length_dw) {
			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
				  p->idx, ib_chunk->length_dw);
			return -EINVAL;
		}
		idx = p->idx;
		header = radeon_get_ib_value(p, idx);
		cmd = GET_DMA_CMD(header);
		count = GET_DMA_COUNT(header);
		tiled = GET_DMA_T(header);

		switch (cmd) {
		case DMA_PACKET_WRITE:
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_WRITE\n");
				return -EINVAL;
			}
			if (tiled) {
2503
				dst_offset = radeon_get_ib_value(p, idx+1);
2504 2505
				dst_offset <<= 8;

2506
				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2507 2508
				p->idx += count + 5;
			} else {
2509 2510
				dst_offset = radeon_get_ib_value(p, idx+1);
				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2511

2512 2513
				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
				ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
				p->idx += count + 3;
			}
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
			break;
		case DMA_PACKET_COPY:
			r = r600_dma_cs_next_reloc(p, &src_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_COPY\n");
				return -EINVAL;
			}
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_COPY\n");
				return -EINVAL;
			}
			if (tiled) {
				idx_value = radeon_get_ib_value(p, idx + 2);
				/* detile bit */
				if (idx_value & (1 << 31)) {
					/* tiled src, linear dst */
2538
					src_offset = radeon_get_ib_value(p, idx+1);
2539
					src_offset <<= 8;
2540
					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2541

2542 2543
					dst_offset = radeon_get_ib_value(p, idx+5);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2544 2545
					ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
					ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2546 2547
				} else {
					/* linear src, tiled dst */
2548 2549
					src_offset = radeon_get_ib_value(p, idx+5);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2550 2551
					ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
					ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2552

2553
					dst_offset = radeon_get_ib_value(p, idx+1);
2554
					dst_offset <<= 8;
2555
					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2556 2557 2558
				}
				p->idx += 7;
			} else {
2559
				if (p->family >= CHIP_RV770) {
2560 2561 2562 2563
					src_offset = radeon_get_ib_value(p, idx+2);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
					dst_offset = radeon_get_ib_value(p, idx+1);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2564

2565 2566 2567 2568
					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
					ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
					ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2569 2570
					p->idx += 5;
				} else {
2571 2572 2573 2574
					src_offset = radeon_get_ib_value(p, idx+2);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
					dst_offset = radeon_get_ib_value(p, idx+1);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2575

2576 2577 2578 2579
					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
					ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
					ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
2580 2581
					p->idx += 4;
				}
2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603
			}
			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
				return -EINVAL;
			}
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
			break;
		case DMA_PACKET_CONSTANT_FILL:
			if (p->family < CHIP_RV770) {
				DRM_ERROR("Constant Fill is 7xx only !\n");
				return -EINVAL;
			}
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_WRITE\n");
				return -EINVAL;
			}
2604 2605
			dst_offset = radeon_get_ib_value(p, idx+1);
			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2606 2607 2608 2609 2610
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
2611 2612
			ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
			ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
			p->idx += 4;
			break;
		case DMA_PACKET_NOP:
			p->idx += 1;
			break;
		default:
			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
			return -EINVAL;
		}
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
	for (r = 0; r < p->ib->length_dw; r++) {
		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
		mdelay(1);
	}
#endif
	return 0;
}