r600_cs.c 77.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/kernel.h>
29
#include <drm/drmP.h>
30 31
#include "radeon.h"
#include "r600d.h"
32
#include "r600_reg_safe.h"
33

34
static int r600_nomm;
35 36
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);

37

38
struct r600_cs_track {
39 40 41 42 43
	/* configuration we miror so that we use same code btw kms/ums */
	u32			group_size;
	u32			nbanks;
	u32			npipes;
	/* value we track */
44
	u32			sq_config;
45
	u32			log_nsamples;
46 47 48
	u32			nsamples;
	u32			cb_color_base_last[8];
	struct radeon_bo	*cb_color_bo[8];
49
	u64			cb_color_bo_mc[8];
50 51 52 53 54 55
	u64			cb_color_bo_offset[8];
	struct radeon_bo	*cb_color_frag_bo[8];
	u64			cb_color_frag_offset[8];
	struct radeon_bo	*cb_color_tile_bo[8];
	u64			cb_color_tile_offset[8];
	u32			cb_color_mask[8];
56
	u32			cb_color_info[8];
57
	u32			cb_color_view[8];
58
	u32			cb_color_size_idx[8]; /* unused */
59
	u32			cb_target_mask;
60
	u32			cb_shader_mask;  /* unused */
61
	bool			is_resolve;
62 63 64
	u32			cb_color_size[8];
	u32			vgt_strmout_en;
	u32			vgt_strmout_buffer_en;
65
	struct radeon_bo	*vgt_strmout_bo[4];
66
	u64			vgt_strmout_bo_mc[4]; /* unused */
67 68
	u32			vgt_strmout_bo_offset[4];
	u32			vgt_strmout_size[4];
69 70 71 72 73 74 75
	u32			db_depth_control;
	u32			db_depth_info;
	u32			db_depth_size_idx;
	u32			db_depth_view;
	u32			db_depth_size;
	u32			db_offset;
	struct radeon_bo	*db_bo;
76
	u64			db_bo_mc;
77
	bool			sx_misc_kill_all_prims;
78 79 80
	bool			cb_dirty;
	bool			db_dirty;
	bool			streamout_dirty;
81 82 83
	struct radeon_bo	*htile_bo;
	u64			htile_offset;
	u32			htile_surface;
84 85
};

86 87
#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
88
#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
89
#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
90
#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
91 92 93
#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
94 95 96 97 98 99

struct gpu_formats {
	unsigned blockwidth;
	unsigned blockheight;
	unsigned blocksize;
	unsigned valid_color;
100
	enum radeon_family min_family;
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
};

static const struct gpu_formats color_formats_table[] = {
	/* 8 bit */
	FMT_8_BIT(V_038004_COLOR_8, 1),
	FMT_8_BIT(V_038004_COLOR_4_4, 1),
	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
	FMT_8_BIT(V_038004_FMT_1, 0),

	/* 16-bit */
	FMT_16_BIT(V_038004_COLOR_16, 1),
	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
	FMT_16_BIT(V_038004_COLOR_8_8, 1),
	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),

	/* 24-bit */
	FMT_24_BIT(V_038004_FMT_8_8_8),
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	/* 32-bit */
	FMT_32_BIT(V_038004_COLOR_32, 1),
	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_16_16, 1),
	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_8_24, 1),
	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_24_8, 1),
	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),

	/* 48-bit */
	FMT_48_BIT(V_038004_FMT_16_16_16),
	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),

	/* 64-bit */
	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_32_32, 1),
	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),

	FMT_96_BIT(V_038004_FMT_32_32_32),
	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),

	/* 128-bit */
	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),

	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },

	/* block compressed formats */
	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
170 171
	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
172

173 174
	/* The other Evergreen formats */
	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
175 176
};

177
bool r600_fmt_is_valid_color(u32 format)
178
{
179
	if (format >= ARRAY_SIZE(color_formats_table))
180
		return false;
181

182 183 184 185 186 187
	if (color_formats_table[format].valid_color)
		return true;

	return false;
}

188
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
189
{
190
	if (format >= ARRAY_SIZE(color_formats_table))
191
		return false;
192

193 194 195
	if (family < color_formats_table[format].min_family)
		return false;

196 197 198 199 200 201
	if (color_formats_table[format].blockwidth > 0)
		return true;

	return false;
}

202
int r600_fmt_get_blocksize(u32 format)
203
{
204
	if (format >= ARRAY_SIZE(color_formats_table))
205 206 207 208 209
		return 0;

	return color_formats_table[format].blocksize;
}

210
int r600_fmt_get_nblocksx(u32 format, u32 w)
211 212
{
	unsigned bw;
213 214

	if (format >= ARRAY_SIZE(color_formats_table))
215 216 217 218 219 220 221 222 223
		return 0;

	bw = color_formats_table[format].blockwidth;
	if (bw == 0)
		return 0;

	return (w + bw - 1) / bw;
}

224
int r600_fmt_get_nblocksy(u32 format, u32 h)
225 226
{
	unsigned bh;
227 228

	if (format >= ARRAY_SIZE(color_formats_table))
229 230 231 232 233 234 235 236 237
		return 0;

	bh = color_formats_table[format].blockheight;
	if (bh == 0)
		return 0;

	return (h + bh - 1) / bh;
}

238 239 240 241 242 243
struct array_mode_checker {
	int array_mode;
	u32 group_size;
	u32 nbanks;
	u32 npipes;
	u32 nsamples;
244
	u32 blocksize;
245 246 247
};

/* returns alignment in pixels for pitch/height/depth and bytes for base */
248
static int r600_get_array_mode_alignment(struct array_mode_checker *values,
249 250 251 252 253 254 255 256 257
						u32 *pitch_align,
						u32 *height_align,
						u32 *depth_align,
						u64 *base_align)
{
	u32 tile_width = 8;
	u32 tile_height = 8;
	u32 macro_tile_width = values->nbanks;
	u32 macro_tile_height = values->npipes;
258
	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
259 260 261 262 263 264 265 266 267 268 269
	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;

	switch (values->array_mode) {
	case ARRAY_LINEAR_GENERAL:
		/* technically tile_width/_height for pitch/height */
		*pitch_align = 1; /* tile_width */
		*height_align = 1; /* tile_height */
		*depth_align = 1;
		*base_align = 1;
		break;
	case ARRAY_LINEAR_ALIGNED:
270
		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
271
		*height_align = 1;
272 273 274 275 276 277
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_1D_TILED_THIN1:
		*pitch_align = max((u32)tile_width,
				   (u32)(values->group_size /
278
					 (tile_height * values->blocksize * values->nsamples)));
279 280 281 282 283
		*height_align = tile_height;
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_2D_TILED_THIN1:
284 285 286
		*pitch_align = max((u32)macro_tile_width * tile_width,
				(u32)((values->group_size * values->nbanks) /
				(values->blocksize * values->nsamples * tile_width)));
287 288 289
		*height_align = macro_tile_height * tile_height;
		*depth_align = 1;
		*base_align = max(macro_tile_bytes,
290
				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
291 292 293 294 295 296 297 298
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

299 300 301 302
static void r600_cs_track_init(struct r600_cs_track *track)
{
	int i;

303 304
	/* assume DX9 mode */
	track->sq_config = DX9_CONSTS;
305 306 307 308 309
	for (i = 0; i < 8; i++) {
		track->cb_color_base_last[i] = 0;
		track->cb_color_size[i] = 0;
		track->cb_color_size_idx[i] = 0;
		track->cb_color_info[i] = 0;
310
		track->cb_color_view[i] = 0xFFFFFFFF;
311 312
		track->cb_color_bo[i] = NULL;
		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
313
		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
314 315 316 317 318
		track->cb_color_frag_bo[i] = NULL;
		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
		track->cb_color_tile_bo[i] = NULL;
		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
		track->cb_color_mask[i] = 0xFFFFFFFF;
319
	}
320
	track->is_resolve = false;
321 322
	track->nsamples = 16;
	track->log_nsamples = 4;
323 324
	track->cb_target_mask = 0xFFFFFFFF;
	track->cb_shader_mask = 0xFFFFFFFF;
325
	track->cb_dirty = true;
326
	track->db_bo = NULL;
327
	track->db_bo_mc = 0xFFFFFFFF;
328 329 330 331 332 333
	/* assume the biggest format and that htile is enabled */
	track->db_depth_info = 7 | (1 << 25);
	track->db_depth_view = 0xFFFFC000;
	track->db_depth_size = 0xFFFFFFFF;
	track->db_depth_size_idx = 0;
	track->db_depth_control = 0xFFFFFFFF;
334
	track->db_dirty = true;
335 336 337
	track->htile_bo = NULL;
	track->htile_offset = 0xFFFFFFFF;
	track->htile_surface = 0;
338 339 340 341 342 343 344

	for (i = 0; i < 4; i++) {
		track->vgt_strmout_size[i] = 0;
		track->vgt_strmout_bo[i] = NULL;
		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
	}
345
	track->streamout_dirty = true;
346
	track->sx_misc_kill_all_prims = false;
347 348
}

349
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
350 351
{
	struct r600_cs_track *track = p->track;
352
	u32 slice_tile_max, size, tmp;
353 354 355
	u32 height, height_align, pitch, pitch_align, depth_align;
	u64 base_offset, base_align;
	struct array_mode_checker array_check;
356
	volatile u32 *ib = p->ib.ptr;
357
	unsigned array_mode;
358
	u32 format;
359 360
	/* When resolve is used, the second colorbuffer has always 1 sample. */
	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
361

362
	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
363
	format = G_0280A0_FORMAT(track->cb_color_info[i]);
364
	if (!r600_fmt_is_valid_color(format)) {
365
		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
366
			 __func__, __LINE__, format,
367 368 369
			i, track->cb_color_info[i]);
		return -EINVAL;
	}
370 371
	/* pitch in pixels */
	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
372
	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
373
	slice_tile_max *= 64;
374
	height = slice_tile_max / pitch;
375 376
	if (height > 8192)
		height = 8192;
377
	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
378 379 380 381 382 383

	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
	array_check.array_mode = array_mode;
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
384
	array_check.nsamples = nsamples;
385
	array_check.blocksize = r600_fmt_get_blocksize(format);
386 387 388 389 390 391 392
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			 track->cb_color_info[i]);
		return -EINVAL;
	}
393
	switch (array_mode) {
394
	case V_0280A0_ARRAY_LINEAR_GENERAL:
395
		break;
396 397 398
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
399 400 401
		/* avoid breaking userspace */
		if (height > 7)
			height &= ~0x7;
402 403 404 405 406 407 408 409 410
		break;
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		break;
	default:
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			track->cb_color_info[i]);
		return -EINVAL;
	}
411 412

	if (!IS_ALIGNED(pitch, pitch_align)) {
413 414
		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, array_mode);
415 416 417
		return -EINVAL;
	}
	if (!IS_ALIGNED(height, height_align)) {
418 419
		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, height, height_align, array_mode);
420 421 422
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
423 424
		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
			 base_offset, base_align, array_mode);
425 426 427
		return -EINVAL;
	}

428
	/* check offset */
429
	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
430
	      r600_fmt_get_blocksize(format) * nsamples;
431 432 433 434 435 436 437 438 439 440 441
	switch (array_mode) {
	default:
	case V_0280A0_ARRAY_LINEAR_GENERAL:
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		tmp += track->cb_color_view[i] & 0xFF;
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
		break;
	}
442
	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
443 444 445
		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
			/* the initial DDX does bad things with the CB size occasionally */
			/* it rounds up height too far for slice tile max but the BO is smaller */
446 447 448 449
			/* r600c,g also seem to flush at bad times in some apps resulting in
			 * bogus values here. So for linear just allow anything to avoid breaking
			 * broken userspace.
			 */
450
		} else {
451
			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
452
				 __func__, i, array_mode,
453
				 track->cb_color_bo_offset[i], tmp,
454 455 456 457
				 radeon_bo_size(track->cb_color_bo[i]),
				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
				 r600_fmt_get_nblocksy(format, height),
				 r600_fmt_get_blocksize(format));
458 459
			return -EINVAL;
		}
460
	}
461
	/* limit max tile */
462
	tmp = (height * pitch) >> 6;
463 464
	if (tmp < slice_tile_max)
		slice_tile_max = tmp;
465
	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
466 467
		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
	ib[track->cb_color_size_idx[i]] = tmp;
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512

	/* FMASK/CMASK */
	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
	case V_0280A0_TILE_DISABLE:
		break;
	case V_0280A0_FRAG_ENABLE:
		if (track->nsamples > 1) {
			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
			/* the tile size is 8x8, but the size is in units of bits.
			 * for bytes, do just * 8. */
			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);

			if (bytes + track->cb_color_frag_offset[i] >
			    radeon_bo_size(track->cb_color_frag_bo[i])) {
				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
					 __func__, tile_max, bytes,
					 track->cb_color_frag_offset[i],
					 radeon_bo_size(track->cb_color_frag_bo[i]));
				return -EINVAL;
			}
		}
		/* fall through */
	case V_0280A0_CLEAR_ENABLE:
	{
		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
		uint32_t bytes = (block_max + 1) * 128;

		if (bytes + track->cb_color_tile_offset[i] >
		    radeon_bo_size(track->cb_color_tile_bo[i])) {
			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
				 __func__, block_max, bytes,
				 track->cb_color_tile_offset[i],
				 radeon_bo_size(track->cb_color_tile_bo[i]));
			return -EINVAL;
		}
		break;
	}
	default:
		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
		return -EINVAL;
	}
513 514 515
	return 0;
}

516 517 518 519 520 521 522 523 524 525
static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
{
	struct r600_cs_track *track = p->track;
	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
	u32 height_align, pitch_align, depth_align;
	u32 pitch = 8192;
	u32 height = 8192;
	u64 base_offset, base_align;
	struct array_mode_checker array_check;
	int array_mode;
526
	volatile u32 *ib = p->ib.ptr;
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620


	if (track->db_bo == NULL) {
		dev_warn(p->dev, "z/stencil with no depth buffer\n");
		return -EINVAL;
	}
	switch (G_028010_FORMAT(track->db_depth_info)) {
	case V_028010_DEPTH_16:
		bpe = 2;
		break;
	case V_028010_DEPTH_X8_24:
	case V_028010_DEPTH_8_24:
	case V_028010_DEPTH_X8_24_FLOAT:
	case V_028010_DEPTH_8_24_FLOAT:
	case V_028010_DEPTH_32_FLOAT:
		bpe = 4;
		break;
	case V_028010_DEPTH_X24_8_32_FLOAT:
		bpe = 8;
		break;
	default:
		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
		return -EINVAL;
	}
	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
		if (!track->db_depth_size_idx) {
			dev_warn(p->dev, "z/stencil buffer size not set\n");
			return -EINVAL;
		}
		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
		tmp = (tmp / bpe) >> 6;
		if (!tmp) {
			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
					track->db_depth_size, bpe, track->db_offset,
					radeon_bo_size(track->db_bo));
			return -EINVAL;
		}
		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
	} else {
		size = radeon_bo_size(track->db_bo);
		/* pitch in pixels */
		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
		slice_tile_max *= 64;
		height = slice_tile_max / pitch;
		if (height > 8192)
			height = 8192;
		base_offset = track->db_bo_mc + track->db_offset;
		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
		array_check.array_mode = array_mode;
		array_check.group_size = track->group_size;
		array_check.nbanks = track->nbanks;
		array_check.npipes = track->npipes;
		array_check.nsamples = track->nsamples;
		array_check.blocksize = bpe;
		if (r600_get_array_mode_alignment(&array_check,
					&pitch_align, &height_align, &depth_align, &base_align)) {
			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					G_028010_ARRAY_MODE(track->db_depth_info),
					track->db_depth_info);
			return -EINVAL;
		}
		switch (array_mode) {
		case V_028010_ARRAY_1D_TILED_THIN1:
			/* don't break userspace */
			height &= ~0x7;
			break;
		case V_028010_ARRAY_2D_TILED_THIN1:
			break;
		default:
			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					G_028010_ARRAY_MODE(track->db_depth_info),
					track->db_depth_info);
			return -EINVAL;
		}

		if (!IS_ALIGNED(pitch, pitch_align)) {
			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
					__func__, __LINE__, pitch, pitch_align, array_mode);
			return -EINVAL;
		}
		if (!IS_ALIGNED(height, height_align)) {
			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
					__func__, __LINE__, height, height_align, array_mode);
			return -EINVAL;
		}
		if (!IS_ALIGNED(base_offset, base_align)) {
			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
					base_offset, base_align, array_mode);
			return -EINVAL;
		}

		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
621
		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
					array_mode,
					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
					radeon_bo_size(track->db_bo));
			return -EINVAL;
		}
	}

	/* hyperz */
	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
		unsigned long size;
		unsigned nbx, nby;

		if (track->htile_bo == NULL) {
			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
				 __func__, __LINE__, track->db_depth_info);
			return -EINVAL;
		}
		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
				 __func__, __LINE__, track->db_depth_size);
			return -EINVAL;
		}

		nbx = pitch;
		nby = height;
		if (G_028D24_LINEAR(track->htile_surface)) {
			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
			nbx = round_up(nbx, 16 * 8);
			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
			nby = round_up(nby, track->npipes * 8);
		} else {
655
			/* always assume 8x8 htile */
656 657 658 659 660
			/* align is htile align * 8, htile align vary according to
			 * number of pipe and tile width and nby
			 */
			switch (track->npipes) {
			case 8:
661 662 663
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 64 * 8);
				nby = round_up(nby, 64 * 8);
664 665
				break;
			case 4:
666 667 668
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 64 * 8);
				nby = round_up(nby, 32 * 8);
669 670
				break;
			case 2:
671 672 673
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 32 * 8);
				nby = round_up(nby, 32 * 8);
674 675
				break;
			case 1:
676 677 678
				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
				nbx = round_up(nbx, 32 * 8);
				nby = round_up(nby, 16 * 8);
679 680 681 682 683 684 685 686
				break;
			default:
				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
					 __func__, __LINE__, track->npipes);
				return -EINVAL;
			}
		}
		/* compute number of htile */
687 688 689 690
		nbx = nbx >> 3;
		nby = nby >> 3;
		/* size must be aligned on npipes * 2K boundary */
		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
691 692 693 694 695 696 697 698 699 700 701 702 703 704
		size += track->htile_offset;

		if (size > radeon_bo_size(track->htile_bo)) {
			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
				 size, nbx, nby);
			return -EINVAL;
		}
	}

	track->db_dirty = false;
	return 0;
}

705 706 707 708 709 710 711 712 713
static int r600_cs_track_check(struct radeon_cs_parser *p)
{
	struct r600_cs_track *track = p->track;
	u32 tmp;
	int r, i;

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
714 715

	/* check streamout */
716
	if (track->streamout_dirty && track->vgt_strmout_en) {
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		for (i = 0; i < 4; i++) {
			if (track->vgt_strmout_buffer_en & (1 << i)) {
				if (track->vgt_strmout_bo[i]) {
					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
						(u64)track->vgt_strmout_size[i];
					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
							  i, offset,
							  radeon_bo_size(track->vgt_strmout_bo[i]));
						return -EINVAL;
					}
				} else {
					dev_warn(p->dev, "No buffer for streamout %d\n", i);
					return -EINVAL;
				}
			}
		}
734
		track->streamout_dirty = false;
735
	}
736

737 738 739
	if (track->sx_misc_kill_all_prims)
		return 0;

740 741 742
	/* check that we have a cb for each enabled target, we don't check
	 * shader_mask because it seems mesa isn't always setting it :(
	 */
743 744
	if (track->cb_dirty) {
		tmp = track->cb_target_mask;
745 746 747 748 749 750

		/* We must check both colorbuffers for RESOLVE. */
		if (track->is_resolve) {
			tmp |= 0xff;
		}

751 752 753 754 755 756 757 758 759 760 761 762
		for (i = 0; i < 8; i++) {
			if ((tmp >> (i * 4)) & 0xF) {
				/* at least one component is enabled */
				if (track->cb_color_bo[i] == NULL) {
					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
					return -EINVAL;
				}
				/* perform rewrite of CB_COLOR[0-7]_SIZE */
				r = r600_cs_track_validate_cb(p, i);
				if (r)
					return r;
763 764
			}
		}
765
		track->cb_dirty = false;
766
	}
767

768
	/* Check depth buffer */
769 770 771 772
	if (track->db_dirty &&
	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
	     G_028800_Z_ENABLE(track->db_depth_control))) {
773 774 775
		r = r600_cs_track_validate_db(p);
		if (r)
			return r;
776
	}
777

778 779 780
	return 0;
}

781
/**
782
 * r600_cs_packet_parse_vline() - parse userspace VLINE packet
783 784
 * @parser:		parser structure holding parsing context.
 *
785 786 787 788 789 790
 * This is an R600-specific function for parsing VLINE packets.
 * Real work is done by r600_cs_common_vline_parse function.
 * Here we just set up ASIC-specific register table and call
 * the common implementation function.
 */
static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
791
{
792 793 794 795
	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
					      AVIVO_D2MODE_VLINE_START_END};
	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
					   AVIVO_D2MODE_VLINE_STATUS};
796

797
	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
798 799
}

800
/**
801
 * r600_cs_common_vline_parse() - common vline parser
802
 * @parser:		parser structure holding parsing context.
803 804
 * @vline_start_end:    table of vline_start_end registers
 * @vline_status:       table of vline_status registers
805 806 807 808 809 810 811 812 813
 *
 * Userspace sends a special sequence for VLINE waits.
 * PACKET0 - VLINE_START_END + value
 * PACKET3 - WAIT_REG_MEM poll vline status reg
 * RELOC (P3) - crtc_id in reloc.
 *
 * This function parses this and relocates the VLINE START END
 * and WAIT_REG_MEM packets to the correct crtc.
 * It also detects a switched off crtc and nulls out the
814 815 816 817 818 819
 * wait in that case. This function is common for all ASICs that
 * are R600 and newer. The parsing algorithm is the same, and only
 * differs in which registers are used.
 *
 * Caller is the ASIC-specific function which passes the parser
 * context and ASIC-specific register table
820
 */
821 822 823
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
			       uint32_t *vline_start_end,
			       uint32_t *vline_status)
824 825 826 827 828 829 830 831 832 833
{
	struct drm_mode_object *obj;
	struct drm_crtc *crtc;
	struct radeon_crtc *radeon_crtc;
	struct radeon_cs_packet p3reloc, wait_reg_mem;
	int crtc_id;
	int r;
	uint32_t header, h_idx, reg, wait_reg_mem_info;
	volatile uint32_t *ib;

834
	ib = p->ib.ptr;
835 836

	/* parse the WAIT_REG_MEM */
837
	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
838 839 840 841
	if (r)
		return r;

	/* check its a WAIT_REG_MEM */
842
	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
843 844
	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
845
		return -EINVAL;
846 847 848 849 850
	}

	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
	/* bit 4 is reg (0) or mem (1) */
	if (wait_reg_mem_info & 0x10) {
851
		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
852
		return -EINVAL;
853
	}
854 855 856
	/* bit 8 is me (0) or pfp (1) */
	if (wait_reg_mem_info & 0x100) {
		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
857
		return -EINVAL;
858 859 860 861
	}
	/* waiting for value to be equal */
	if ((wait_reg_mem_info & 0x7) != 0x3) {
		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
862
		return -EINVAL;
863
	}
864
	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
865
		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
866
		return -EINVAL;
867 868
	}

869
	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
870
		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
871
		return -EINVAL;
872 873 874
	}

	/* jump over the NOP */
875
	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
876 877 878 879 880 881 882 883 884
	if (r)
		return r;

	h_idx = p->idx - 2;
	p->idx += wait_reg_mem.count + 2;
	p->idx += p3reloc.count + 2;

	header = radeon_get_ib_value(p, h_idx);
	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
885
	reg = R600_CP_PACKET0_GET_REG(header);
886

887 888 889
	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
	if (!obj) {
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
890
		return -EINVAL;
891 892 893 894 895 896
	}
	crtc = obj_to_crtc(obj);
	radeon_crtc = to_radeon_crtc(crtc);
	crtc_id = radeon_crtc->crtc_id;

	if (!crtc->enabled) {
897
		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
898 899 900 901 902 903 904
		ib[h_idx + 2] = PACKET2(0);
		ib[h_idx + 3] = PACKET2(0);
		ib[h_idx + 4] = PACKET2(0);
		ib[h_idx + 5] = PACKET2(0);
		ib[h_idx + 6] = PACKET2(0);
		ib[h_idx + 7] = PACKET2(0);
		ib[h_idx + 8] = PACKET2(0);
905 906 907
	} else if (reg == vline_start_end[0]) {
		header &= ~R600_CP_PACKET0_REG_MASK;
		header |= vline_start_end[crtc_id] >> 2;
908
		ib[h_idx] = header;
909 910 911 912
		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
	} else {
		DRM_ERROR("unknown crtc reloc\n");
		return -EINVAL;
913
	}
914
	return 0;
915 916
}

917 918 919 920
static int r600_packet0_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt,
				unsigned idx, unsigned reg)
{
921 922
	int r;

923 924
	switch (reg) {
	case AVIVO_D1MODE_VLINE_START_END:
925 926 927 928 929 930
		r = r600_cs_packet_parse_vline(p);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
					idx, reg);
			return r;
		}
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
		break;
	default:
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
		       reg, idx);
		return -EINVAL;
	}
	return 0;
}

static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
	unsigned reg, i;
	unsigned idx;
	int r;

	idx = pkt->idx + 1;
	reg = pkt->reg;
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
		r = r600_packet0_check(p, pkt, idx, reg);
		if (r) {
			return r;
		}
	}
	return 0;
}

958 959 960 961 962 963 964 965 966 967
/**
 * r600_cs_check_reg() - check if register is authorized or not
 * @parser: parser structure holding parsing context
 * @reg: register we are testing
 * @idx: index into the cs buffer
 *
 * This function will test against r600_reg_safe_bm and return 0
 * if register is safe. If register is not flag as safe this function
 * will test it against a list of register needind special handling.
 */
968
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
969 970 971 972 973 974 975
{
	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
	struct radeon_cs_reloc *reloc;
	u32 m, i, tmp, *ib;
	int r;

	i = (reg >> 7);
976
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
977 978 979 980 981 982
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return 0;
983
	ib = p->ib.ptr;
984
	switch (reg) {
L
Lucas De Marchi 已提交
985
	/* force following reg to 0 in an attempt to disable out buffer
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	 * which will need us to better understand how it works to perform
	 * security check on it (Jerome)
	 */
	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
	case R_008C44_SQ_ESGS_RING_SIZE:
	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
	case R_008C54_SQ_ESTMP_RING_SIZE:
	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
	case R_008C74_SQ_FBUF_RING_SIZE:
	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
	case R_008C5C_SQ_GSTMP_RING_SIZE:
	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
	case R_008C4C_SQ_GSVS_RING_SIZE:
	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
	case R_008C6C_SQ_PSTMP_RING_SIZE:
	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
	case R_008C7C_SQ_REDUC_RING_SIZE:
	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
	case R_008C64_SQ_VSTMP_RING_SIZE:
	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
		/* get value to populate the IB don't remove */
		tmp =radeon_get_ib_value(p, idx);
		ib[idx] = 0;
		break;
1010 1011 1012
	case SQ_CONFIG:
		track->sq_config = radeon_get_ib_value(p, idx);
		break;
1013 1014
	case R_028800_DB_DEPTH_CONTROL:
		track->db_depth_control = radeon_get_ib_value(p, idx);
1015
		track->db_dirty = true;
1016 1017
		break;
	case R_028010_DB_DEPTH_INFO:
1018
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1019
		    radeon_cs_packet_next_is_pkt3_nop(p)) {
1020
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
			if (r) {
				dev_warn(p->dev, "bad SET_CONTEXT_REG "
					 "0x%04X\n", reg);
				return -EINVAL;
			}
			track->db_depth_info = radeon_get_ib_value(p, idx);
			ib[idx] &= C_028010_ARRAY_MODE;
			track->db_depth_info &= C_028010_ARRAY_MODE;
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
			} else {
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
			}
1036
		} else {
1037
			track->db_depth_info = radeon_get_ib_value(p, idx);
1038 1039
		}
		track->db_dirty = true;
1040 1041 1042
		break;
	case R_028004_DB_DEPTH_VIEW:
		track->db_depth_view = radeon_get_ib_value(p, idx);
1043
		track->db_dirty = true;
1044 1045 1046 1047
		break;
	case R_028000_DB_DEPTH_SIZE:
		track->db_depth_size = radeon_get_ib_value(p, idx);
		track->db_depth_size_idx = idx;
1048
		track->db_dirty = true;
1049 1050 1051
		break;
	case R_028AB0_VGT_STRMOUT_EN:
		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1052
		track->streamout_dirty = true;
1053 1054 1055
		break;
	case R_028B20_VGT_STRMOUT_BUFFER_EN:
		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1056
		track->streamout_dirty = true;
1057
		break;
1058 1059 1060 1061
	case VGT_STRMOUT_BUFFER_BASE_0:
	case VGT_STRMOUT_BUFFER_BASE_1:
	case VGT_STRMOUT_BUFFER_BASE_2:
	case VGT_STRMOUT_BUFFER_BASE_3:
1062
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->vgt_strmout_bo[tmp] = reloc->robj;
		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1073
		track->streamout_dirty = true;
1074 1075 1076 1077 1078 1079 1080 1081
		break;
	case VGT_STRMOUT_BUFFER_SIZE_0:
	case VGT_STRMOUT_BUFFER_SIZE_1:
	case VGT_STRMOUT_BUFFER_SIZE_2:
	case VGT_STRMOUT_BUFFER_SIZE_3:
		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
		/* size in register is DWs, convert to bytes */
		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1082
		track->streamout_dirty = true;
1083 1084
		break;
	case CP_COHER_BASE:
1085
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1086 1087 1088 1089 1090 1091 1092
		if (r) {
			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
1093 1094
	case R_028238_CB_TARGET_MASK:
		track->cb_target_mask = radeon_get_ib_value(p, idx);
1095
		track->cb_dirty = true;
1096 1097 1098 1099 1100 1101
		break;
	case R_02823C_CB_SHADER_MASK:
		track->cb_shader_mask = radeon_get_ib_value(p, idx);
		break;
	case R_028C04_PA_SC_AA_CONFIG:
		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1102
		track->log_nsamples = tmp;
1103
		track->nsamples = 1 << tmp;
1104
		track->cb_dirty = true;
1105
		break;
1106 1107 1108 1109 1110
	case R_028808_CB_COLOR_CONTROL:
		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
		track->cb_dirty = true;
		break;
1111 1112 1113 1114 1115 1116 1117 1118
	case R_0280A0_CB_COLOR0_INFO:
	case R_0280A4_CB_COLOR1_INFO:
	case R_0280A8_CB_COLOR2_INFO:
	case R_0280AC_CB_COLOR3_INFO:
	case R_0280B0_CB_COLOR4_INFO:
	case R_0280B4_CB_COLOR5_INFO:
	case R_0280B8_CB_COLOR6_INFO:
	case R_0280BC_CB_COLOR7_INFO:
1119
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1120
		     radeon_cs_packet_next_is_pkt3_nop(p)) {
1121
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
			}
		} else {
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
		}
1139
		track->cb_dirty = true;
1140
		break;
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
	case R_028080_CB_COLOR0_VIEW:
	case R_028084_CB_COLOR1_VIEW:
	case R_028088_CB_COLOR2_VIEW:
	case R_02808C_CB_COLOR3_VIEW:
	case R_028090_CB_COLOR4_VIEW:
	case R_028094_CB_COLOR5_VIEW:
	case R_028098_CB_COLOR6_VIEW:
	case R_02809C_CB_COLOR7_VIEW:
		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1151
		track->cb_dirty = true;
1152
		break;
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	case R_028060_CB_COLOR0_SIZE:
	case R_028064_CB_COLOR1_SIZE:
	case R_028068_CB_COLOR2_SIZE:
	case R_02806C_CB_COLOR3_SIZE:
	case R_028070_CB_COLOR4_SIZE:
	case R_028074_CB_COLOR5_SIZE:
	case R_028078_CB_COLOR6_SIZE:
	case R_02807C_CB_COLOR7_SIZE:
		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
		track->cb_color_size_idx[tmp] = idx;
1164
		track->cb_dirty = true;
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
		break;
		/* This register were added late, there is userspace
		 * which does provide relocation for those but set
		 * 0 offset. In order to avoid breaking old userspace
		 * we detect this and set address to point to last
		 * CB_COLOR0_BASE, note that if userspace doesn't set
		 * CB_COLOR0_BASE before this register we will report
		 * error. Old userspace always set CB_COLOR0_BASE
		 * before any of this.
		 */
	case R_0280E0_CB_COLOR0_FRAG:
	case R_0280E4_CB_COLOR1_FRAG:
	case R_0280E8_CB_COLOR2_FRAG:
	case R_0280EC_CB_COLOR3_FRAG:
	case R_0280F0_CB_COLOR4_FRAG:
	case R_0280F4_CB_COLOR5_FRAG:
	case R_0280F8_CB_COLOR6_FRAG:
	case R_0280FC_CB_COLOR7_FRAG:
		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1184
		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1185 1186 1187 1188 1189
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1190 1191
			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
			ib[idx] = track->cb_color_base_last[tmp];
1192
		} else {
1193
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1194 1195 1196 1197 1198
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			track->cb_color_frag_bo[tmp] = reloc->robj;
1199 1200 1201 1202 1203
			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		}
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
		}
		break;
	case R_0280C0_CB_COLOR0_TILE:
	case R_0280C4_CB_COLOR1_TILE:
	case R_0280C8_CB_COLOR2_TILE:
	case R_0280CC_CB_COLOR3_TILE:
	case R_0280D0_CB_COLOR4_TILE:
	case R_0280D4_CB_COLOR5_TILE:
	case R_0280D8_CB_COLOR6_TILE:
	case R_0280DC_CB_COLOR7_TILE:
		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1215
		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1216 1217 1218 1219 1220
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1221 1222
			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
			ib[idx] = track->cb_color_base_last[tmp];
1223
		} else {
1224
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1225 1226 1227 1228 1229
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			track->cb_color_tile_bo[tmp] = reloc->robj;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		}
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
		}
		break;
	case R_028100_CB_COLOR0_MASK:
	case R_028104_CB_COLOR1_MASK:
	case R_028108_CB_COLOR2_MASK:
	case R_02810C_CB_COLOR3_MASK:
	case R_028110_CB_COLOR4_MASK:
	case R_028114_CB_COLOR5_MASK:
	case R_028118_CB_COLOR6_MASK:
	case R_02811C_CB_COLOR7_MASK:
		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1246
		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1247 1248
		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
			track->cb_dirty = true;
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
		}
		break;
	case CB_COLOR0_BASE:
	case CB_COLOR1_BASE:
	case CB_COLOR2_BASE:
	case CB_COLOR3_BASE:
	case CB_COLOR4_BASE:
	case CB_COLOR5_BASE:
	case CB_COLOR6_BASE:
	case CB_COLOR7_BASE:
1259
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1260 1261 1262 1263 1264
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1265
		tmp = (reg - CB_COLOR0_BASE) / 4;
1266
		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1267 1268 1269
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->cb_color_base_last[tmp] = ib[idx];
		track->cb_color_bo[tmp] = reloc->robj;
1270
		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1271
		track->cb_dirty = true;
1272 1273
		break;
	case DB_DEPTH_BASE:
1274
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1275 1276 1277 1278 1279
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1280
		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1281 1282
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->db_bo = reloc->robj;
1283
		track->db_bo_mc = reloc->lobj.gpu_offset;
1284
		track->db_dirty = true;
1285 1286
		break;
	case DB_HTILE_DATA_BASE:
1287
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->htile_bo = reloc->robj;
		track->db_dirty = true;
		break;
	case DB_HTILE_SURFACE:
		track->htile_surface = radeon_get_ib_value(p, idx);
1300 1301
		/* force 8x8 htile width and height */
		ib[idx] |= 3;
1302 1303
		track->db_dirty = true;
		break;
1304 1305 1306 1307 1308
	case SQ_PGM_START_FS:
	case SQ_PGM_START_ES:
	case SQ_PGM_START_VS:
	case SQ_PGM_START_GS:
	case SQ_PGM_START_PS:
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
	case SQ_ALU_CONST_CACHE_GS_0:
	case SQ_ALU_CONST_CACHE_GS_1:
	case SQ_ALU_CONST_CACHE_GS_2:
	case SQ_ALU_CONST_CACHE_GS_3:
	case SQ_ALU_CONST_CACHE_GS_4:
	case SQ_ALU_CONST_CACHE_GS_5:
	case SQ_ALU_CONST_CACHE_GS_6:
	case SQ_ALU_CONST_CACHE_GS_7:
	case SQ_ALU_CONST_CACHE_GS_8:
	case SQ_ALU_CONST_CACHE_GS_9:
	case SQ_ALU_CONST_CACHE_GS_10:
	case SQ_ALU_CONST_CACHE_GS_11:
	case SQ_ALU_CONST_CACHE_GS_12:
	case SQ_ALU_CONST_CACHE_GS_13:
	case SQ_ALU_CONST_CACHE_GS_14:
	case SQ_ALU_CONST_CACHE_GS_15:
	case SQ_ALU_CONST_CACHE_PS_0:
	case SQ_ALU_CONST_CACHE_PS_1:
	case SQ_ALU_CONST_CACHE_PS_2:
	case SQ_ALU_CONST_CACHE_PS_3:
	case SQ_ALU_CONST_CACHE_PS_4:
	case SQ_ALU_CONST_CACHE_PS_5:
	case SQ_ALU_CONST_CACHE_PS_6:
	case SQ_ALU_CONST_CACHE_PS_7:
	case SQ_ALU_CONST_CACHE_PS_8:
	case SQ_ALU_CONST_CACHE_PS_9:
	case SQ_ALU_CONST_CACHE_PS_10:
	case SQ_ALU_CONST_CACHE_PS_11:
	case SQ_ALU_CONST_CACHE_PS_12:
	case SQ_ALU_CONST_CACHE_PS_13:
	case SQ_ALU_CONST_CACHE_PS_14:
	case SQ_ALU_CONST_CACHE_PS_15:
	case SQ_ALU_CONST_CACHE_VS_0:
	case SQ_ALU_CONST_CACHE_VS_1:
	case SQ_ALU_CONST_CACHE_VS_2:
	case SQ_ALU_CONST_CACHE_VS_3:
	case SQ_ALU_CONST_CACHE_VS_4:
	case SQ_ALU_CONST_CACHE_VS_5:
	case SQ_ALU_CONST_CACHE_VS_6:
	case SQ_ALU_CONST_CACHE_VS_7:
	case SQ_ALU_CONST_CACHE_VS_8:
	case SQ_ALU_CONST_CACHE_VS_9:
	case SQ_ALU_CONST_CACHE_VS_10:
	case SQ_ALU_CONST_CACHE_VS_11:
	case SQ_ALU_CONST_CACHE_VS_12:
	case SQ_ALU_CONST_CACHE_VS_13:
	case SQ_ALU_CONST_CACHE_VS_14:
	case SQ_ALU_CONST_CACHE_VS_15:
1357
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1358 1359 1360 1361
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1362 1363 1364 1365
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
	case SX_MEMORY_EXPORT_BASE:
1366
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1367 1368 1369 1370
		if (r) {
			dev_warn(p->dev, "bad SET_CONFIG_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1371 1372 1373
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
1374 1375 1376
	case SX_MISC:
		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
		break;
1377 1378 1379 1380 1381 1382 1383
	default:
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	return 0;
}

1384
unsigned r600_mip_minify(unsigned size, unsigned level)
1385
{
1386 1387 1388 1389 1390 1391
	unsigned val;

	val = max(1U, size >> level);
	if (level > 0)
		val = roundup_pow_of_two(val);
	return val;
1392 1393
}

1394
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1395
			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1396
			      unsigned block_align, unsigned height_align, unsigned base_align,
1397
			      unsigned *l0_size, unsigned *mipmap_size)
1398
{
1399 1400 1401 1402 1403
	unsigned offset, i, level;
	unsigned width, height, depth, size;
	unsigned blocksize;
	unsigned nbx, nby;
	unsigned nlevels = llevel - blevel + 1;
1404

1405
	*l0_size = -1;
1406
	blocksize = r600_fmt_get_blocksize(format);
1407

1408 1409 1410
	w0 = r600_mip_minify(w0, 0);
	h0 = r600_mip_minify(h0, 0);
	d0 = r600_mip_minify(d0, 0);
1411
	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1412 1413
		width = r600_mip_minify(w0, i);
		nbx = r600_fmt_get_nblocksx(format, width);
1414 1415 1416

		nbx = round_up(nbx, block_align);

1417 1418
		height = r600_mip_minify(h0, i);
		nby = r600_fmt_get_nblocksy(format, height);
1419 1420
		nby = round_up(nby, height_align);

1421
		depth = r600_mip_minify(d0, i);
1422

1423
		size = nbx * nby * blocksize * nsamples;
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
		if (nfaces)
			size *= nfaces;
		else
			size *= depth;

		if (i == 0)
			*l0_size = size;

		if (i == 0 || i == 1)
			offset = round_up(offset, base_align);

		offset += size;
1436 1437
	}
	*mipmap_size = offset;
1438
	if (llevel == 0)
1439
		*mipmap_size = *l0_size;
1440 1441
	if (!blevel)
		*mipmap_size -= *l0_size;
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
}

/**
 * r600_check_texture_resource() - check if register is authorized or not
 * @p: parser structure holding parsing context
 * @idx: index into the cs buffer
 * @texture: texture's bo structure
 * @mipmap: mipmap's bo structure
 *
 * This function will check that the resource has valid field and that
 * the texture and mipmap bo object are big enough to cover this resource.
 */
1454
static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1455 1456
					      struct radeon_bo *texture,
					      struct radeon_bo *mipmap,
1457 1458
					      u64 base_offset,
					      u64 mip_offset,
1459
					      u32 tiling_flags)
1460
{
1461
	struct r600_cs_track *track = p->track;
1462 1463
	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1464
	u32 height_align, pitch, pitch_align, depth_align;
1465
	u32 barray, larray;
1466 1467
	u64 base_align;
	struct array_mode_checker array_check;
1468
	u32 format;
1469
	bool is_array;
1470 1471 1472 1473

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
1474

1475 1476 1477 1478
	/* convert to bytes */
	base_offset <<= 8;
	mip_offset <<= 8;

1479
	word0 = radeon_get_ib_value(p, idx + 0);
1480
	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1481 1482 1483 1484 1485
		if (tiling_flags & RADEON_TILING_MACRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
		else if (tiling_flags & RADEON_TILING_MICRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
	}
1486
	word1 = radeon_get_ib_value(p, idx + 1);
1487 1488 1489 1490 1491
	word2 = radeon_get_ib_value(p, idx + 2) << 8;
	word3 = radeon_get_ib_value(p, idx + 3) << 8;
	word4 = radeon_get_ib_value(p, idx + 4);
	word5 = radeon_get_ib_value(p, idx + 5);
	dim = G_038000_DIM(word0);
1492
	w0 = G_038000_TEX_WIDTH(word0) + 1;
1493
	pitch = (G_038000_PITCH(word0) + 1) * 8;
1494 1495
	h0 = G_038004_TEX_HEIGHT(word1) + 1;
	d0 = G_038004_TEX_DEPTH(word1);
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
	format = G_038004_DATA_FORMAT(word1);
	blevel = G_038010_BASE_LEVEL(word4);
	llevel = G_038014_LAST_LEVEL(word5);
	/* pitch in texels */
	array_check.array_mode = G_038000_TILE_MODE(word0);
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
	array_check.nsamples = 1;
	array_check.blocksize = r600_fmt_get_blocksize(format);
1506
	nfaces = 1;
1507 1508
	is_array = false;
	switch (dim) {
1509 1510 1511 1512 1513
	case V_038000_SQ_TEX_DIM_1D:
	case V_038000_SQ_TEX_DIM_2D:
	case V_038000_SQ_TEX_DIM_3D:
		break;
	case V_038000_SQ_TEX_DIM_CUBEMAP:
1514 1515 1516 1517
		if (p->family >= CHIP_RV770)
			nfaces = 8;
		else
			nfaces = 6;
1518 1519 1520
		break;
	case V_038000_SQ_TEX_DIM_1D_ARRAY:
	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1521
		is_array = true;
1522
		break;
1523
	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1524 1525 1526 1527 1528 1529
		is_array = true;
		/* fall through */
	case V_038000_SQ_TEX_DIM_2D_MSAA:
		array_check.nsamples = 1 << llevel;
		llevel = 0;
		break;
1530 1531 1532 1533
	default:
		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
		return -EINVAL;
	}
1534
	if (!r600_fmt_is_valid_texture(format, p->family)) {
1535
		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1536
			 __func__, __LINE__, format);
1537 1538
		return -EINVAL;
	}
1539

1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
			 __func__, __LINE__, G_038000_TILE_MODE(word0));
		return -EINVAL;
	}

	/* XXX check height as well... */

	if (!IS_ALIGNED(pitch, pitch_align)) {
1550 1551
		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1552 1553 1554
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
1555 1556
		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1557 1558 1559
		return -EINVAL;
	}
	if (!IS_ALIGNED(mip_offset, base_align)) {
1560 1561
		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1562 1563 1564
		return -EINVAL;
	}

1565 1566 1567 1568
	if (blevel > llevel) {
		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
			 blevel, llevel);
	}
1569 1570 1571
	if (is_array) {
		barray = G_038014_BASE_ARRAY(word5);
		larray = G_038014_LAST_ARRAY(word5);
1572 1573 1574

		nfaces = larray - barray + 1;
	}
1575
	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1576
			  pitch_align, height_align, base_align,
1577
			  &l0_size, &mipmap_size);
1578
	/* using get ib will give us the offset into the texture bo */
1579
	if ((l0_size + word2) > radeon_bo_size(texture)) {
1580 1581 1582 1583
		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
			 w0, h0, pitch_align, height_align,
			 array_check.array_mode, format, word2,
			 l0_size, radeon_bo_size(texture));
1584
		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1585 1586 1587
		return -EINVAL;
	}
	/* using get ib will give us the offset into the mipmap bo */
1588
	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1589
		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1590
		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1591 1592 1593 1594
	}
	return 0;
}

1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
	u32 m, i;

	i = (reg >> 7);
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return false;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return true;
	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
	return false;
}

1611 1612 1613 1614
static int r600_packet3_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
	struct radeon_cs_reloc *reloc;
1615
	struct r600_cs_track *track;
1616 1617 1618 1619 1620
	volatile u32 *ib;
	unsigned idx;
	unsigned i;
	unsigned start_reg, end_reg, reg;
	int r;
1621
	u32 idx_value;
1622

1623
	track = (struct r600_cs_track *)p->track;
1624
	ib = p->ib.ptr;
1625
	idx = pkt->idx + 1;
1626
	idx_value = radeon_get_ib_value(p, idx);
1627

1628
	switch (pkt->opcode) {
1629 1630 1631 1632
	case PACKET3_SET_PREDICATION:
	{
		int pred_op;
		int tmp;
1633 1634
		uint64_t offset;

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
		if (pkt->count != 1) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

		tmp = radeon_get_ib_value(p, idx + 1);
		pred_op = (tmp >> 16) & 0x7;

		/* for the clear predicate operation */
		if (pred_op == 0)
			return 0;

		if (pred_op > 2) {
			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
			return -EINVAL;
		}

1652
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1653 1654 1655 1656 1657
		if (r) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

1658 1659 1660 1661 1662 1663
		offset = reloc->lobj.gpu_offset +
		         (idx_value & 0xfffffff0) +
		         ((u64)(tmp & 0xff) << 32);

		ib[idx + 0] = offset;
		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1664 1665 1666
	}
	break;

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
	case PACKET3_START_3D_CMDBUF:
		if (p->family >= CHIP_RV770 || pkt->count) {
			DRM_ERROR("bad START_3D\n");
			return -EINVAL;
		}
		break;
	case PACKET3_CONTEXT_CONTROL:
		if (pkt->count != 1) {
			DRM_ERROR("bad CONTEXT_CONTROL\n");
			return -EINVAL;
		}
		break;
	case PACKET3_INDEX_TYPE:
	case PACKET3_NUM_INSTANCES:
		if (pkt->count) {
			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
			return -EINVAL;
		}
		break;
	case PACKET3_DRAW_INDEX:
1687 1688
	{
		uint64_t offset;
1689 1690 1691 1692
		if (pkt->count != 3) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
1693
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1694 1695 1696 1697
		if (r) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
1698 1699 1700 1701 1702 1703 1704 1705

		offset = reloc->lobj.gpu_offset +
		         idx_value +
		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);

		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;

1706 1707 1708 1709 1710
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1711
		break;
1712
	}
1713 1714 1715 1716 1717
	case PACKET3_DRAW_INDEX_AUTO:
		if (pkt->count != 1) {
			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
			return -EINVAL;
		}
1718 1719 1720 1721 1722
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
			return r;
		}
1723 1724 1725 1726 1727 1728 1729
		break;
	case PACKET3_DRAW_INDEX_IMMD_BE:
	case PACKET3_DRAW_INDEX_IMMD:
		if (pkt->count < 2) {
			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
			return -EINVAL;
		}
1730 1731 1732 1733 1734
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1735 1736 1737 1738 1739 1740 1741
		break;
	case PACKET3_WAIT_REG_MEM:
		if (pkt->count != 5) {
			DRM_ERROR("bad WAIT_REG_MEM\n");
			return -EINVAL;
		}
		/* bit 4 is reg (0) or mem (1) */
1742
		if (idx_value & 0x10) {
1743 1744
			uint64_t offset;

1745
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1746 1747 1748 1749
			if (r) {
				DRM_ERROR("bad WAIT_REG_MEM\n");
				return -EINVAL;
			}
1750 1751 1752 1753 1754 1755 1756

			offset = reloc->lobj.gpu_offset +
			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
			ib[idx+2] = upper_32_bits(offset) & 0xff;
1757 1758 1759
		} else if (idx_value & 0x100) {
			DRM_ERROR("cannot use PFP on REG wait\n");
			return -EINVAL;
1760 1761
		}
		break;
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
	case PACKET3_CP_DMA:
	{
		u32 command, size;
		u64 offset, tmp;
		if (pkt->count != 4) {
			DRM_ERROR("bad CP DMA\n");
			return -EINVAL;
		}
		command = radeon_get_ib_value(p, idx+4);
		size = command & 0x1fffff;
		if (command & PACKET3_CP_DMA_CMD_SAS) {
			/* src address space is register */
			DRM_ERROR("CP DMA SAS not supported\n");
			return -EINVAL;
		} else {
			if (command & PACKET3_CP_DMA_CMD_SAIC) {
				DRM_ERROR("CP DMA SAIC only supported for registers\n");
				return -EINVAL;
			}
			/* src address space is memory */
1782
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
			if (r) {
				DRM_ERROR("bad CP DMA SRC\n");
				return -EINVAL;
			}

			tmp = radeon_get_ib_value(p, idx) +
				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);

			offset = reloc->lobj.gpu_offset + tmp;

			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
					 tmp + size, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}

			ib[idx] = offset;
			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
		}
		if (command & PACKET3_CP_DMA_CMD_DAS) {
			/* dst address space is register */
			DRM_ERROR("CP DMA DAS not supported\n");
			return -EINVAL;
		} else {
			/* dst address space is memory */
			if (command & PACKET3_CP_DMA_CMD_DAIC) {
				DRM_ERROR("CP DMA DAIC only supported for registers\n");
				return -EINVAL;
			}
1812
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833
			if (r) {
				DRM_ERROR("bad CP DMA DST\n");
				return -EINVAL;
			}

			tmp = radeon_get_ib_value(p, idx+2) +
				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);

			offset = reloc->lobj.gpu_offset + tmp;

			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
					 tmp + size, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}

			ib[idx+2] = offset;
			ib[idx+3] = upper_32_bits(offset) & 0xff;
		}
		break;
	}
1834 1835 1836 1837 1838 1839
	case PACKET3_SURFACE_SYNC:
		if (pkt->count != 3) {
			DRM_ERROR("bad SURFACE_SYNC\n");
			return -EINVAL;
		}
		/* 0xffffffff/0x0 is flush all cache flag */
1840 1841
		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
		    radeon_get_ib_value(p, idx + 2) != 0) {
1842
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
			if (r) {
				DRM_ERROR("bad SURFACE_SYNC\n");
				return -EINVAL;
			}
			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		}
		break;
	case PACKET3_EVENT_WRITE:
		if (pkt->count != 2 && pkt->count != 0) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
		if (pkt->count) {
1856 1857
			uint64_t offset;

1858
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1859 1860 1861 1862
			if (r) {
				DRM_ERROR("bad EVENT_WRITE\n");
				return -EINVAL;
			}
1863 1864 1865 1866 1867 1868
			offset = reloc->lobj.gpu_offset +
			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

			ib[idx+1] = offset & 0xfffffff8;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
1869 1870 1871
		}
		break;
	case PACKET3_EVENT_WRITE_EOP:
1872 1873 1874
	{
		uint64_t offset;

1875 1876 1877 1878
		if (pkt->count != 4) {
			DRM_ERROR("bad EVENT_WRITE_EOP\n");
			return -EINVAL;
		}
1879
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1880 1881 1882 1883
		if (r) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
1884 1885 1886 1887 1888 1889 1890

		offset = reloc->lobj.gpu_offset +
		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);

		ib[idx+1] = offset & 0xfffffffc;
		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1891
		break;
1892
	}
1893
	case PACKET3_SET_CONFIG_REG:
1894
		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1895 1896 1897 1898 1899 1900 1901 1902 1903
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1904 1905 1906
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1907 1908 1909
		}
		break;
	case PACKET3_SET_CONTEXT_REG:
1910
		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1911 1912 1913 1914 1915 1916 1917 1918 1919
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1920 1921 1922
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1923 1924 1925 1926 1927 1928 1929
		}
		break;
	case PACKET3_SET_RESOURCE:
		if (pkt->count % 7) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
1930
		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1931 1932 1933 1934 1935 1936 1937 1938
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
		for (i = 0; i < (pkt->count / 7); i++) {
1939
			struct radeon_bo *texture, *mipmap;
1940
			u32 size, offset, base_offset, mip_offset;
1941

1942
			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1943 1944
			case SQ_TEX_VTX_VALID_TEXTURE:
				/* tex base */
1945
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1946 1947 1948 1949
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1950
				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1951
				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1952 1953 1954 1955 1956
					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
				}
1957
				texture = reloc->robj;
1958
				/* tex mip base */
1959
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1960 1961 1962 1963
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1964
				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1965 1966
				mipmap = reloc->robj;
				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1967 1968 1969 1970
								texture, mipmap,
								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
								reloc->lobj.tiling_flags);
1971 1972
				if (r)
					return r;
1973 1974
				ib[idx+1+(i*7)+2] += base_offset;
				ib[idx+1+(i*7)+3] += mip_offset;
1975 1976
				break;
			case SQ_TEX_VTX_VALID_BUFFER:
1977 1978
			{
				uint64_t offset64;
1979
				/* vtx base */
1980
				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1981 1982 1983 1984
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1985
				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1986
				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1987 1988
				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
					/* force size to size of the buffer */
1989 1990
					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
						 size + offset, radeon_bo_size(reloc->robj));
1991
					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
1992
				}
1993 1994 1995 1996 1997

				offset64 = reloc->lobj.gpu_offset + offset;
				ib[idx+1+(i*8)+0] = offset64;
				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
						    (upper_32_bits(offset64) & 0xff);
1998
				break;
1999
			}
2000 2001 2002 2003 2004 2005 2006 2007 2008
			case SQ_TEX_VTX_INVALID_TEXTURE:
			case SQ_TEX_VTX_INVALID_BUFFER:
			default:
				DRM_ERROR("bad SET_RESOURCE\n");
				return -EINVAL;
			}
		}
		break;
	case PACKET3_SET_ALU_CONST:
2009 2010 2011 2012 2013 2014 2015 2016 2017
		if (track->sq_config & DX9_CONSTS) {
			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
			end_reg = 4 * pkt->count + start_reg - 4;
			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
				DRM_ERROR("bad SET_ALU_CONST\n");
				return -EINVAL;
			}
2018 2019 2020
		}
		break;
	case PACKET3_SET_BOOL_CONST:
2021
		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2022 2023 2024 2025 2026 2027 2028 2029 2030
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
			DRM_ERROR("bad SET_BOOL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_LOOP_CONST:
2031
		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2032 2033 2034 2035 2036 2037 2038 2039 2040
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
			DRM_ERROR("bad SET_LOOP_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_CTL_CONST:
2041
		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
			DRM_ERROR("bad SET_CTL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_SAMPLER:
		if (pkt->count % 3) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
2055
		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2056 2057 2058 2059 2060 2061 2062 2063
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
		break;
2064
	case PACKET3_STRMOUT_BASE_UPDATE:
2065 2066
		/* RS780 and RS880 also need this */
		if (p->family < CHIP_RS780) {
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
			return -EINVAL;
		}
		if (pkt->count != 1) {
			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
			return -EINVAL;
		}
		if (idx_value > 3) {
			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
			return -EINVAL;
		}
		{
			u64 offset;

2081
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
			if (r) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
				return -EINVAL;
			}

			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
				return -EINVAL;
			}

			offset = radeon_get_ib_value(p, idx+1) << 8;
			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
					  offset, track->vgt_strmout_bo_offset[idx_value]);
				return -EINVAL;
			}

			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		}
		break;
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	case PACKET3_SURFACE_BASE_UPDATE:
		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		if (pkt->count) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		break;
2117 2118 2119 2120 2121 2122 2123 2124
	case PACKET3_STRMOUT_BUFFER_UPDATE:
		if (pkt->count != 4) {
			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
			return -EINVAL;
		}
		/* Updating memory at DST_ADDRESS. */
		if (idx_value & 0x1) {
			u64 offset;
2125
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2137 2138 2139
			offset += reloc->lobj.gpu_offset;
			ib[idx+1] = offset;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
2140 2141 2142 2143
		}
		/* Reading data from SRC_ADDRESS. */
		if (((idx_value >> 1) & 0x3) == 2) {
			u64 offset;
2144
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2156 2157 2158
			offset += reloc->lobj.gpu_offset;
			ib[idx+3] = offset;
			ib[idx+4] = upper_32_bits(offset) & 0xff;
2159 2160
		}
		break;
2161 2162 2163 2164 2165 2166 2167 2168
	case PACKET3_MEM_WRITE:
	{
		u64 offset;

		if (pkt->count != 3) {
			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
			return -EINVAL;
		}
2169
		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
		if (r) {
			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
			return -EINVAL;
		}
		offset = radeon_get_ib_value(p, idx+0);
		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
		if (offset & 0x7) {
			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
			return -EINVAL;
		}
		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
				  offset + 8, radeon_bo_size(reloc->robj));
			return -EINVAL;
		}
		offset += reloc->lobj.gpu_offset;
		ib[idx+0] = offset;
		ib[idx+1] = upper_32_bits(offset) & 0xff;
		break;
	}
2190 2191 2192 2193 2194 2195 2196 2197
	case PACKET3_COPY_DW:
		if (pkt->count != 4) {
			DRM_ERROR("bad COPY_DW (invalid count)\n");
			return -EINVAL;
		}
		if (idx_value & 0x1) {
			u64 offset;
			/* SRC is memory. */
2198
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
			if (r) {
				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2210 2211 2212
			offset += reloc->lobj.gpu_offset;
			ib[idx+1] = offset;
			ib[idx+2] = upper_32_bits(offset) & 0xff;
2213 2214 2215 2216 2217 2218 2219 2220 2221
		} else {
			/* SRC is a reg. */
			reg = radeon_get_ib_value(p, idx+1) << 2;
			if (!r600_is_safe_reg(p, reg, idx+1))
				return -EINVAL;
		}
		if (idx_value & 0x2) {
			u64 offset;
			/* DST is memory. */
2222
			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
			if (r) {
				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
2234 2235 2236
			offset += reloc->lobj.gpu_offset;
			ib[idx+3] = offset;
			ib[idx+4] = upper_32_bits(offset) & 0xff;
2237 2238 2239 2240 2241 2242 2243
		} else {
			/* DST is a reg. */
			reg = radeon_get_ib_value(p, idx+3) << 2;
			if (!r600_is_safe_reg(p, reg, idx+3))
				return -EINVAL;
		}
		break;
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	case PACKET3_NOP:
		break;
	default:
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
		return -EINVAL;
	}
	return 0;
}

int r600_cs_parse(struct radeon_cs_parser *p)
{
	struct radeon_cs_packet pkt;
2256
	struct r600_cs_track *track;
2257 2258
	int r;

2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
	if (p->track == NULL) {
		/* initialize tracker, we are in kms */
		track = kzalloc(sizeof(*track), GFP_KERNEL);
		if (track == NULL)
			return -ENOMEM;
		r600_cs_track_init(track);
		if (p->rdev->family < CHIP_RV770) {
			track->npipes = p->rdev->config.r600.tiling_npipes;
			track->nbanks = p->rdev->config.r600.tiling_nbanks;
			track->group_size = p->rdev->config.r600.tiling_group_size;
		} else if (p->rdev->family <= CHIP_RV740) {
			track->npipes = p->rdev->config.rv770.tiling_npipes;
			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
			track->group_size = p->rdev->config.rv770.tiling_group_size;
		}
		p->track = track;
	}
2276
	do {
2277
		r = radeon_cs_packet_parse(p, &pkt, p->idx);
2278
		if (r) {
2279 2280
			kfree(p->track);
			p->track = NULL;
2281 2282 2283 2284
			return r;
		}
		p->idx += pkt.count + 2;
		switch (pkt.type) {
2285
		case RADEON_PACKET_TYPE0:
2286 2287
			r = r600_cs_parse_packet0(p, &pkt);
			break;
2288
		case RADEON_PACKET_TYPE2:
2289
			break;
2290
		case RADEON_PACKET_TYPE3:
2291 2292 2293 2294
			r = r600_packet3_check(p, &pkt);
			break;
		default:
			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2295
			kfree(p->track);
2296
			p->track = NULL;
2297 2298 2299
			return -EINVAL;
		}
		if (r) {
2300
			kfree(p->track);
2301
			p->track = NULL;
2302 2303 2304 2305
			return r;
		}
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
2306 2307
	for (r = 0; r < p->ib.length_dw; r++) {
		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2308 2309 2310
		mdelay(1);
	}
#endif
2311
	kfree(p->track);
2312
	p->track = NULL;
2313 2314 2315
	return 0;
}

2316
#ifdef CONFIG_DRM_RADEON_UMS
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330

/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
	unsigned i;

	kfree(parser->relocs);
2331 2332
	for (i = 0; i < parser->nchunks; i++)
		drm_free_large(parser->chunks[i].kdata);
2333 2334 2335 2336
	kfree(parser->chunks);
	kfree(parser->chunks_array);
}

2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
	if (p->chunk_relocs_idx == -1) {
		return 0;
	}
	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
	if (p->relocs == NULL) {
		return -ENOMEM;
	}
	return 0;
}

2349 2350 2351 2352 2353
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
			unsigned family, u32 *ib, int *l)
{
	struct radeon_cs_parser parser;
	struct radeon_cs_chunk *ib_chunk;
2354
	struct r600_cs_track *track;
2355 2356
	int r;

2357 2358 2359 2360 2361 2362
	/* initialize tracker */
	track = kzalloc(sizeof(*track), GFP_KERNEL);
	if (track == NULL)
		return -ENOMEM;
	r600_cs_track_init(track);
	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2363 2364 2365
	/* initialize parser */
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
2366
	parser.dev = &dev->pdev->dev;
2367 2368
	parser.rdev = NULL;
	parser.family = family;
2369
	parser.track = track;
2370
	parser.ib.ptr = ib;
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	r = r600_cs_parser_relocs_legacy(&parser);
	if (r) {
		DRM_ERROR("Failed to parse relocation !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	/* Copy the packet into the IB, the parser will read from the
	 * input memory (cached) and write to the IB (which can be
	 * uncached). */
	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2387 2388
	parser.ib.length_dw = ib_chunk->length_dw;
	*l = parser.ib.length_dw;
2389 2390
	if (DRM_COPY_FROM_USER(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
		r = -EFAULT;
2391 2392 2393
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2394
	r = r600_cs_parse(&parser);
2395 2396 2397 2398 2399
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2400 2401 2402 2403 2404 2405
	r600_cs_parser_fini(&parser, r);
	return r;
}

void r600_cs_legacy_init(void)
{
2406
	r600_nomm = 1;
2407
}
2408

2409 2410
#endif

2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427
/*
 *  DMA
 */
/**
 * r600_dma_cs_next_reloc() - parse next reloc
 * @p:		parser structure holding parsing context.
 * @cs_reloc:		reloc informations
 *
 * Return the next reloc, do bo validation and compute
 * GPU offset using the provided start.
 **/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
			   struct radeon_cs_reloc **cs_reloc)
{
	struct radeon_cs_chunk *relocs_chunk;
	unsigned idx;

2428
	*cs_reloc = NULL;
2429 2430 2431 2432 2433 2434
	if (p->chunk_relocs_idx == -1) {
		DRM_ERROR("No relocation chunk !\n");
		return -EINVAL;
	}
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
	idx = p->dma_reloc_idx;
2435
	if (idx >= p->nrelocs) {
2436
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2437
			  idx, p->nrelocs);
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
		return -EINVAL;
	}
	*cs_reloc = p->relocs_ptr[idx];
	p->dma_reloc_idx++;
	return 0;
}

#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)

/**
 * r600_dma_cs_parse() - parse the DMA IB
 * @p:		parser structure holding parsing context.
 *
 * Parses the DMA IB from the CS ioctl and updates
 * the GPU addresses based on the reloc information and
 * checks for errors. (R6xx-R7xx)
 * Returns 0 for success and an error on failure.
 **/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
	struct radeon_cs_reloc *src_reloc, *dst_reloc;
	u32 header, cmd, count, tiled;
	volatile u32 *ib = p->ib.ptr;
	u32 idx, idx_value;
	u64 src_offset, dst_offset;
	int r;

	do {
		if (p->idx >= ib_chunk->length_dw) {
			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
				  p->idx, ib_chunk->length_dw);
			return -EINVAL;
		}
		idx = p->idx;
		header = radeon_get_ib_value(p, idx);
		cmd = GET_DMA_CMD(header);
		count = GET_DMA_COUNT(header);
		tiled = GET_DMA_T(header);

		switch (cmd) {
		case DMA_PACKET_WRITE:
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_WRITE\n");
				return -EINVAL;
			}
			if (tiled) {
2488
				dst_offset = radeon_get_ib_value(p, idx+1);
2489 2490 2491 2492 2493
				dst_offset <<= 8;

				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
				p->idx += count + 5;
			} else {
2494 2495
				dst_offset = radeon_get_ib_value(p, idx+1);
				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522

				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
				p->idx += count + 3;
			}
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
			break;
		case DMA_PACKET_COPY:
			r = r600_dma_cs_next_reloc(p, &src_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_COPY\n");
				return -EINVAL;
			}
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_COPY\n");
				return -EINVAL;
			}
			if (tiled) {
				idx_value = radeon_get_ib_value(p, idx + 2);
				/* detile bit */
				if (idx_value & (1 << 31)) {
					/* tiled src, linear dst */
2523
					src_offset = radeon_get_ib_value(p, idx+1);
2524 2525 2526
					src_offset <<= 8;
					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);

2527 2528
					dst_offset = radeon_get_ib_value(p, idx+5);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2529 2530 2531 2532
					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
				} else {
					/* linear src, tiled dst */
2533 2534
					src_offset = radeon_get_ib_value(p, idx+5);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2535 2536 2537
					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;

2538
					dst_offset = radeon_get_ib_value(p, idx+1);
2539 2540 2541 2542 2543
					dst_offset <<= 8;
					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
				}
				p->idx += 7;
			} else {
2544
				if (p->family >= CHIP_RV770) {
2545 2546 2547 2548
					src_offset = radeon_get_ib_value(p, idx+2);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
					dst_offset = radeon_get_ib_value(p, idx+1);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2549

2550 2551 2552 2553 2554 2555
					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
					p->idx += 5;
				} else {
2556 2557 2558 2559
					src_offset = radeon_get_ib_value(p, idx+2);
					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
					dst_offset = radeon_get_ib_value(p, idx+1);
					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2560 2561 2562 2563 2564 2565 2566

					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
					p->idx += 4;
				}
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
			}
			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
				return -EINVAL;
			}
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
			break;
		case DMA_PACKET_CONSTANT_FILL:
			if (p->family < CHIP_RV770) {
				DRM_ERROR("Constant Fill is 7xx only !\n");
				return -EINVAL;
			}
			r = r600_dma_cs_next_reloc(p, &dst_reloc);
			if (r) {
				DRM_ERROR("bad DMA_PACKET_WRITE\n");
				return -EINVAL;
			}
2589 2590
			dst_offset = radeon_get_ib_value(p, idx+1);
			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
				return -EINVAL;
			}
			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
			p->idx += 4;
			break;
		case DMA_PACKET_NOP:
			p->idx += 1;
			break;
		default:
			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
			return -EINVAL;
		}
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
	for (r = 0; r < p->ib->length_dw; r++) {
		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
		mdelay(1);
	}
#endif
	return 0;
}