r600_cs.c 62.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/kernel.h>
29 30 31
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
32
#include "r600_reg_safe.h"
33 34 35 36 37 38 39

static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
					struct radeon_cs_reloc **cs_reloc);
static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
					struct radeon_cs_reloc **cs_reloc);
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 41
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);

42

43
struct r600_cs_track {
44 45 46 47 48
	/* configuration we miror so that we use same code btw kms/ums */
	u32			group_size;
	u32			nbanks;
	u32			npipes;
	/* value we track */
49
	u32			sq_config;
50 51 52
	u32			nsamples;
	u32			cb_color_base_last[8];
	struct radeon_bo	*cb_color_bo[8];
53
	u64			cb_color_bo_mc[8];
54 55 56 57
	u32			cb_color_bo_offset[8];
	struct radeon_bo	*cb_color_frag_bo[8];
	struct radeon_bo	*cb_color_tile_bo[8];
	u32			cb_color_info[8];
58
	u32			cb_color_view[8];
59 60 61 62 63 64
	u32			cb_color_size_idx[8];
	u32			cb_target_mask;
	u32			cb_shader_mask;
	u32			cb_color_size[8];
	u32			vgt_strmout_en;
	u32			vgt_strmout_buffer_en;
65 66 67 68
	struct radeon_bo	*vgt_strmout_bo[4];
	u64			vgt_strmout_bo_mc[4];
	u32			vgt_strmout_bo_offset[4];
	u32			vgt_strmout_size[4];
69 70 71 72 73 74 75
	u32			db_depth_control;
	u32			db_depth_info;
	u32			db_depth_size_idx;
	u32			db_depth_view;
	u32			db_depth_size;
	u32			db_offset;
	struct radeon_bo	*db_bo;
76
	u64			db_bo_mc;
77 78
};

79 80
#define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
#define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
81
#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
82
#define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
83
#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
84 85 86
#define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
#define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
87 88 89 90 91 92

struct gpu_formats {
	unsigned blockwidth;
	unsigned blockheight;
	unsigned blocksize;
	unsigned valid_color;
93
	enum radeon_family min_family;
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
};

static const struct gpu_formats color_formats_table[] = {
	/* 8 bit */
	FMT_8_BIT(V_038004_COLOR_8, 1),
	FMT_8_BIT(V_038004_COLOR_4_4, 1),
	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
	FMT_8_BIT(V_038004_FMT_1, 0),

	/* 16-bit */
	FMT_16_BIT(V_038004_COLOR_16, 1),
	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
	FMT_16_BIT(V_038004_COLOR_8_8, 1),
	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),

	/* 24-bit */
	FMT_24_BIT(V_038004_FMT_8_8_8),
115

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	/* 32-bit */
	FMT_32_BIT(V_038004_COLOR_32, 1),
	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_16_16, 1),
	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_8_24, 1),
	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_24_8, 1),
	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),

	/* 48-bit */
	FMT_48_BIT(V_038004_FMT_16_16_16),
	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),

	/* 64-bit */
	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_32_32, 1),
	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),

	FMT_96_BIT(V_038004_FMT_32_32_32),
	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),

	/* 128-bit */
	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),

	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },

	/* block compressed formats */
	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
163 164
	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
165

166 167
	/* The other Evergreen formats */
	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
168 169
};

170
bool r600_fmt_is_valid_color(u32 format)
171
{
172
	if (format >= ARRAY_SIZE(color_formats_table))
173
		return false;
174

175 176 177 178 179 180
	if (color_formats_table[format].valid_color)
		return true;

	return false;
}

181
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
182
{
183
	if (format >= ARRAY_SIZE(color_formats_table))
184
		return false;
185

186 187 188
	if (family < color_formats_table[format].min_family)
		return false;

189 190 191 192 193 194
	if (color_formats_table[format].blockwidth > 0)
		return true;

	return false;
}

195
int r600_fmt_get_blocksize(u32 format)
196
{
197
	if (format >= ARRAY_SIZE(color_formats_table))
198 199 200 201 202
		return 0;

	return color_formats_table[format].blocksize;
}

203
int r600_fmt_get_nblocksx(u32 format, u32 w)
204 205
{
	unsigned bw;
206 207

	if (format >= ARRAY_SIZE(color_formats_table))
208 209 210 211 212 213 214 215 216
		return 0;

	bw = color_formats_table[format].blockwidth;
	if (bw == 0)
		return 0;

	return (w + bw - 1) / bw;
}

217
int r600_fmt_get_nblocksy(u32 format, u32 h)
218 219
{
	unsigned bh;
220 221

	if (format >= ARRAY_SIZE(color_formats_table))
222 223 224 225 226 227 228 229 230
		return 0;

	bh = color_formats_table[format].blockheight;
	if (bh == 0)
		return 0;

	return (h + bh - 1) / bh;
}

231 232 233 234 235 236
struct array_mode_checker {
	int array_mode;
	u32 group_size;
	u32 nbanks;
	u32 npipes;
	u32 nsamples;
237
	u32 blocksize;
238 239 240
};

/* returns alignment in pixels for pitch/height/depth and bytes for base */
241
static int r600_get_array_mode_alignment(struct array_mode_checker *values,
242 243 244 245 246 247 248 249 250
						u32 *pitch_align,
						u32 *height_align,
						u32 *depth_align,
						u64 *base_align)
{
	u32 tile_width = 8;
	u32 tile_height = 8;
	u32 macro_tile_width = values->nbanks;
	u32 macro_tile_height = values->npipes;
251
	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
252 253 254 255 256 257 258 259 260 261 262
	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;

	switch (values->array_mode) {
	case ARRAY_LINEAR_GENERAL:
		/* technically tile_width/_height for pitch/height */
		*pitch_align = 1; /* tile_width */
		*height_align = 1; /* tile_height */
		*depth_align = 1;
		*base_align = 1;
		break;
	case ARRAY_LINEAR_ALIGNED:
263
		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
264
		*height_align = 1;
265 266 267 268 269 270
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_1D_TILED_THIN1:
		*pitch_align = max((u32)tile_width,
				   (u32)(values->group_size /
271
					 (tile_height * values->blocksize * values->nsamples)));
272 273 274 275 276
		*height_align = tile_height;
		*depth_align = 1;
		*base_align = values->group_size;
		break;
	case ARRAY_2D_TILED_THIN1:
277 278 279
		*pitch_align = max((u32)macro_tile_width * tile_width,
				(u32)((values->group_size * values->nbanks) /
				(values->blocksize * values->nsamples * tile_width)));
280 281 282
		*height_align = macro_tile_height * tile_height;
		*depth_align = 1;
		*base_align = max(macro_tile_bytes,
283
				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
284 285 286 287 288 289 290 291
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

292 293 294 295
static void r600_cs_track_init(struct r600_cs_track *track)
{
	int i;

296 297
	/* assume DX9 mode */
	track->sq_config = DX9_CONSTS;
298 299 300 301 302
	for (i = 0; i < 8; i++) {
		track->cb_color_base_last[i] = 0;
		track->cb_color_size[i] = 0;
		track->cb_color_size_idx[i] = 0;
		track->cb_color_info[i] = 0;
303
		track->cb_color_view[i] = 0xFFFFFFFF;
304 305
		track->cb_color_bo[i] = NULL;
		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
306
		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
307 308 309 310
	}
	track->cb_target_mask = 0xFFFFFFFF;
	track->cb_shader_mask = 0xFFFFFFFF;
	track->db_bo = NULL;
311
	track->db_bo_mc = 0xFFFFFFFF;
312 313 314 315 316 317
	/* assume the biggest format and that htile is enabled */
	track->db_depth_info = 7 | (1 << 25);
	track->db_depth_view = 0xFFFFC000;
	track->db_depth_size = 0xFFFFFFFF;
	track->db_depth_size_idx = 0;
	track->db_depth_control = 0xFFFFFFFF;
318 319 320 321 322 323 324

	for (i = 0; i < 4; i++) {
		track->vgt_strmout_size[i] = 0;
		track->vgt_strmout_bo[i] = NULL;
		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
	}
325 326
}

327
static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
328 329
{
	struct r600_cs_track *track = p->track;
330
	u32 slice_tile_max, size, tmp;
331 332 333
	u32 height, height_align, pitch, pitch_align, depth_align;
	u64 base_offset, base_align;
	struct array_mode_checker array_check;
334
	volatile u32 *ib = p->ib->ptr;
335
	unsigned array_mode;
336
	u32 format;
337

338 339 340 341
	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
		return -EINVAL;
	}
342
	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
343
	format = G_0280A0_FORMAT(track->cb_color_info[i]);
344
	if (!r600_fmt_is_valid_color(format)) {
345
		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
346
			 __func__, __LINE__, format,
347 348 349
			i, track->cb_color_info[i]);
		return -EINVAL;
	}
350 351
	/* pitch in pixels */
	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
352
	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
353
	slice_tile_max *= 64;
354
	height = slice_tile_max / pitch;
355 356
	if (height > 8192)
		height = 8192;
357
	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
358 359 360 361 362 363 364

	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
	array_check.array_mode = array_mode;
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
	array_check.nsamples = track->nsamples;
365
	array_check.blocksize = r600_fmt_get_blocksize(format);
366 367 368 369 370 371 372
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			 track->cb_color_info[i]);
		return -EINVAL;
	}
373
	switch (array_mode) {
374
	case V_0280A0_ARRAY_LINEAR_GENERAL:
375
		break;
376 377 378
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
379 380 381
		/* avoid breaking userspace */
		if (height > 7)
			height &= ~0x7;
382 383 384 385 386 387 388 389 390
		break;
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		break;
	default:
		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
			track->cb_color_info[i]);
		return -EINVAL;
	}
391 392

	if (!IS_ALIGNED(pitch, pitch_align)) {
393 394
		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, array_mode);
395 396 397
		return -EINVAL;
	}
	if (!IS_ALIGNED(height, height_align)) {
398 399
		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, height, height_align, array_mode);
400 401 402
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
403 404
		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
			 base_offset, base_align, array_mode);
405 406 407
		return -EINVAL;
	}

408
	/* check offset */
409 410 411 412 413 414 415 416 417 418 419 420
	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
	switch (array_mode) {
	default:
	case V_0280A0_ARRAY_LINEAR_GENERAL:
	case V_0280A0_ARRAY_LINEAR_ALIGNED:
		tmp += track->cb_color_view[i] & 0xFF;
		break;
	case V_0280A0_ARRAY_1D_TILED_THIN1:
	case V_0280A0_ARRAY_2D_TILED_THIN1:
		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
		break;
	}
421
	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
422 423 424
		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
			/* the initial DDX does bad things with the CB size occasionally */
			/* it rounds up height too far for slice tile max but the BO is smaller */
425 426 427 428
			/* r600c,g also seem to flush at bad times in some apps resulting in
			 * bogus values here. So for linear just allow anything to avoid breaking
			 * broken userspace.
			 */
429
		} else {
430 431
			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
				 __func__, i, array_mode,
432
				 track->cb_color_bo_offset[i], tmp,
433 434 435 436
				 radeon_bo_size(track->cb_color_bo[i]),
				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
				 r600_fmt_get_nblocksy(format, height),
				 r600_fmt_get_blocksize(format));
437 438
			return -EINVAL;
		}
439
	}
440
	/* limit max tile */
441
	tmp = (height * pitch) >> 6;
442 443
	if (tmp < slice_tile_max)
		slice_tile_max = tmp;
444
	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
	ib[track->cb_color_size_idx[i]] = tmp;
	return 0;
}

static int r600_cs_track_check(struct radeon_cs_parser *p)
{
	struct r600_cs_track *track = p->track;
	u32 tmp;
	int r, i;
	volatile u32 *ib = p->ib->ptr;

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479

	/* check streamout */
	if (track->vgt_strmout_en) {
		for (i = 0; i < 4; i++) {
			if (track->vgt_strmout_buffer_en & (1 << i)) {
				if (track->vgt_strmout_bo[i]) {
					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
						(u64)track->vgt_strmout_size[i];
					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
							  i, offset,
							  radeon_bo_size(track->vgt_strmout_bo[i]));
						return -EINVAL;
					}
				} else {
					dev_warn(p->dev, "No buffer for streamout %d\n", i);
					return -EINVAL;
				}
			}
		}
480
	}
481

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	/* check that we have a cb for each enabled target, we don't check
	 * shader_mask because it seems mesa isn't always setting it :(
	 */
	tmp = track->cb_target_mask;
	for (i = 0; i < 8; i++) {
		if ((tmp >> (i * 4)) & 0xF) {
			/* at least one component is enabled */
			if (track->cb_color_bo[i] == NULL) {
				dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
					__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
				return -EINVAL;
			}
			/* perform rewrite of CB_COLOR[0-7]_SIZE */
			r = r600_cs_track_validate_cb(p, i);
			if (r)
				return r;
		}
	}
	/* Check depth buffer */
	if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
		G_028800_Z_ENABLE(track->db_depth_control)) {
503 504 505 506 507 508
		u32 nviews, bpe, ntiles, size, slice_tile_max;
		u32 height, height_align, pitch, pitch_align, depth_align;
		u64 base_offset, base_align;
		struct array_mode_checker array_check;
		int array_mode;

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
		if (track->db_bo == NULL) {
			dev_warn(p->dev, "z/stencil with no depth buffer\n");
			return -EINVAL;
		}
		if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
			dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
			return -EINVAL;
		}
		switch (G_028010_FORMAT(track->db_depth_info)) {
		case V_028010_DEPTH_16:
			bpe = 2;
			break;
		case V_028010_DEPTH_X8_24:
		case V_028010_DEPTH_8_24:
		case V_028010_DEPTH_X8_24_FLOAT:
		case V_028010_DEPTH_8_24_FLOAT:
		case V_028010_DEPTH_32_FLOAT:
			bpe = 4;
			break;
		case V_028010_DEPTH_X24_8_32_FLOAT:
			bpe = 8;
			break;
		default:
			dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
			return -EINVAL;
		}
		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
			if (!track->db_depth_size_idx) {
				dev_warn(p->dev, "z/stencil buffer size not set\n");
				return -EINVAL;
			}
			tmp = radeon_bo_size(track->db_bo) - track->db_offset;
			tmp = (tmp / bpe) >> 6;
			if (!tmp) {
				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
						track->db_depth_size, bpe, track->db_offset,
						radeon_bo_size(track->db_bo));
				return -EINVAL;
			}
			ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
		} else {
550
			size = radeon_bo_size(track->db_bo);
551 552
			/* pitch in pixels */
			pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
553 554
			slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
			slice_tile_max *= 64;
555
			height = slice_tile_max / pitch;
556 557
			if (height > 8192)
				height = 8192;
558 559 560 561 562 563 564
			base_offset = track->db_bo_mc + track->db_offset;
			array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
			array_check.array_mode = array_mode;
			array_check.group_size = track->group_size;
			array_check.nbanks = track->nbanks;
			array_check.npipes = track->npipes;
			array_check.nsamples = track->nsamples;
565
			array_check.blocksize = bpe;
566 567 568 569 570 571 572 573
			if (r600_get_array_mode_alignment(&array_check,
							  &pitch_align, &height_align, &depth_align, &base_align)) {
				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					 G_028010_ARRAY_MODE(track->db_depth_info),
					 track->db_depth_info);
				return -EINVAL;
			}
			switch (array_mode) {
574
			case V_028010_ARRAY_1D_TILED_THIN1:
575 576
				/* don't break userspace */
				height &= ~0x7;
577 578 579 580 581 582 583 584 585
				break;
			case V_028010_ARRAY_2D_TILED_THIN1:
				break;
			default:
				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
					 G_028010_ARRAY_MODE(track->db_depth_info),
					 track->db_depth_info);
				return -EINVAL;
			}
586 587

			if (!IS_ALIGNED(pitch, pitch_align)) {
588 589
				dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
					 __func__, __LINE__, pitch, pitch_align, array_mode);
590 591 592
				return -EINVAL;
			}
			if (!IS_ALIGNED(height, height_align)) {
593 594
				dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
					 __func__, __LINE__, height, height_align, array_mode);
595 596
				return -EINVAL;
			}
597
			if (!IS_ALIGNED(base_offset, base_align)) {
598 599
				dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
					 base_offset, base_align, array_mode);
600 601 602
				return -EINVAL;
			}

603 604 605 606
			ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
			tmp = ntiles * bpe * 64 * nviews;
			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
607 608 609 610
				dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
					 array_mode,
					 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
					 radeon_bo_size(track->db_bo));
611 612 613 614 615 616 617
				return -EINVAL;
			}
		}
	}
	return 0;
}

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
/**
 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
 * @parser:	parser structure holding parsing context.
 * @pkt:	where to store packet informations
 *
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
 * if packet is bigger than remaining ib size. or if packets is unknown.
 **/
int r600_cs_packet_parse(struct radeon_cs_parser *p,
			struct radeon_cs_packet *pkt,
			unsigned idx)
{
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
	uint32_t header;

	if (idx >= ib_chunk->length_dw) {
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
			  idx, ib_chunk->length_dw);
		return -EINVAL;
	}
638
	header = radeon_get_ib_value(p, idx);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
	pkt->idx = idx;
	pkt->type = CP_PACKET_GET_TYPE(header);
	pkt->count = CP_PACKET_GET_COUNT(header);
	pkt->one_reg_wr = 0;
	switch (pkt->type) {
	case PACKET_TYPE0:
		pkt->reg = CP_PACKET0_GET_REG(header);
		break;
	case PACKET_TYPE3:
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
		break;
	case PACKET_TYPE2:
		pkt->count = -1;
		break;
	default:
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
		return -EINVAL;
	}
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
		return -EINVAL;
	}
	return 0;
}

/**
 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
 * @parser:		parser structure holding parsing context.
 * @data:		pointer to relocation data
 * @offset_start:	starting offset
 * @offset_mask:	offset mask (to align start offset on)
 * @reloc:		reloc informations
 *
 * Check next packet is relocation packet3, do bo validation and compute
 * GPU offset using the provided start.
 **/
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
					struct radeon_cs_reloc **cs_reloc)
{
	struct radeon_cs_chunk *relocs_chunk;
	struct radeon_cs_packet p3reloc;
	unsigned idx;
	int r;

	if (p->chunk_relocs_idx == -1) {
		DRM_ERROR("No relocation chunk !\n");
		return -EINVAL;
	}
	*cs_reloc = NULL;
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
	if (r) {
		return r;
	}
	p->idx += p3reloc.count + 2;
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
			  p3reloc.idx);
		return -EINVAL;
	}
700
	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	if (idx >= relocs_chunk->length_dw) {
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
			  idx, relocs_chunk->length_dw);
		return -EINVAL;
	}
	/* FIXME: we assume reloc size is 4 dwords */
	*cs_reloc = p->relocs_ptr[(idx / 4)];
	return 0;
}

/**
 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
 * @parser:		parser structure holding parsing context.
 * @data:		pointer to relocation data
 * @offset_start:	starting offset
 * @offset_mask:	offset mask (to align start offset on)
 * @reloc:		reloc informations
 *
 * Check next packet is relocation packet3, do bo validation and compute
 * GPU offset using the provided start.
 **/
static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
					struct radeon_cs_reloc **cs_reloc)
{
	struct radeon_cs_chunk *relocs_chunk;
	struct radeon_cs_packet p3reloc;
	unsigned idx;
	int r;

	if (p->chunk_relocs_idx == -1) {
		DRM_ERROR("No relocation chunk !\n");
		return -EINVAL;
	}
	*cs_reloc = NULL;
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
	if (r) {
		return r;
	}
	p->idx += p3reloc.count + 2;
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
			  p3reloc.idx);
		return -EINVAL;
	}
746
	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
747 748 749 750 751
	if (idx >= relocs_chunk->length_dw) {
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
			  idx, relocs_chunk->length_dw);
		return -EINVAL;
	}
752
	*cs_reloc = p->relocs;
753 754 755 756 757
	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
	return 0;
}

758 759 760 761 762 763 764
/**
 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
 * @parser:		parser structure holding parsing context.
 *
 * Check next packet is relocation packet3, do bo validation and compute
 * GPU offset using the provided start.
 **/
765
static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
766 767 768 769 770 771 772 773 774 775 776 777 778 779
{
	struct radeon_cs_packet p3reloc;
	int r;

	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
	if (r) {
		return 0;
	}
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
		return 0;
	}
	return 1;
}

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
/**
 * r600_cs_packet_next_vline() - parse userspace VLINE packet
 * @parser:		parser structure holding parsing context.
 *
 * Userspace sends a special sequence for VLINE waits.
 * PACKET0 - VLINE_START_END + value
 * PACKET3 - WAIT_REG_MEM poll vline status reg
 * RELOC (P3) - crtc_id in reloc.
 *
 * This function parses this and relocates the VLINE START END
 * and WAIT_REG_MEM packets to the correct crtc.
 * It also detects a switched off crtc and nulls out the
 * wait in that case.
 */
static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
	struct drm_mode_object *obj;
	struct drm_crtc *crtc;
	struct radeon_crtc *radeon_crtc;
	struct radeon_cs_packet p3reloc, wait_reg_mem;
	int crtc_id;
	int r;
	uint32_t header, h_idx, reg, wait_reg_mem_info;
	volatile uint32_t *ib;

	ib = p->ib->ptr;

	/* parse the WAIT_REG_MEM */
	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
	if (r)
		return r;

	/* check its a WAIT_REG_MEM */
	if (wait_reg_mem.type != PACKET_TYPE3 ||
	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
816
		return -EINVAL;
817 818 819 820 821 822
	}

	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
	/* bit 4 is reg (0) or mem (1) */
	if (wait_reg_mem_info & 0x10) {
		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
823
		return -EINVAL;
824 825 826 827
	}
	/* waiting for value to be equal */
	if ((wait_reg_mem_info & 0x7) != 0x3) {
		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
828
		return -EINVAL;
829 830 831
	}
	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
832
		return -EINVAL;
833 834 835 836
	}

	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
837
		return -EINVAL;
838 839 840 841 842 843 844 845 846 847 848 849 850
	}

	/* jump over the NOP */
	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
	if (r)
		return r;

	h_idx = p->idx - 2;
	p->idx += wait_reg_mem.count + 2;
	p->idx += p3reloc.count + 2;

	header = radeon_get_ib_value(p, h_idx);
	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
851
	reg = CP_PACKET0_GET_REG(header);
852

853 854 855
	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
	if (!obj) {
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
856
		return -EINVAL;
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
	}
	crtc = obj_to_crtc(obj);
	radeon_crtc = to_radeon_crtc(crtc);
	crtc_id = radeon_crtc->crtc_id;

	if (!crtc->enabled) {
		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
		ib[h_idx + 2] = PACKET2(0);
		ib[h_idx + 3] = PACKET2(0);
		ib[h_idx + 4] = PACKET2(0);
		ib[h_idx + 5] = PACKET2(0);
		ib[h_idx + 6] = PACKET2(0);
		ib[h_idx + 7] = PACKET2(0);
		ib[h_idx + 8] = PACKET2(0);
	} else if (crtc_id == 1) {
		switch (reg) {
		case AVIVO_D1MODE_VLINE_START_END:
			header &= ~R600_CP_PACKET0_REG_MASK;
			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
			break;
		default:
			DRM_ERROR("unknown crtc reloc\n");
879
			return -EINVAL;
880 881 882 883
		}
		ib[h_idx] = header;
		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
	}
884 885

	return 0;
886 887
}

888 889 890 891
static int r600_packet0_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt,
				unsigned idx, unsigned reg)
{
892 893
	int r;

894 895
	switch (reg) {
	case AVIVO_D1MODE_VLINE_START_END:
896 897 898 899 900 901
		r = r600_cs_packet_parse_vline(p);
		if (r) {
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
					idx, reg);
			return r;
		}
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
		break;
	default:
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
		       reg, idx);
		return -EINVAL;
	}
	return 0;
}

static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
	unsigned reg, i;
	unsigned idx;
	int r;

	idx = pkt->idx + 1;
	reg = pkt->reg;
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
		r = r600_packet0_check(p, pkt, idx, reg);
		if (r) {
			return r;
		}
	}
	return 0;
}

929 930 931 932 933 934 935 936 937 938
/**
 * r600_cs_check_reg() - check if register is authorized or not
 * @parser: parser structure holding parsing context
 * @reg: register we are testing
 * @idx: index into the cs buffer
 *
 * This function will test against r600_reg_safe_bm and return 0
 * if register is safe. If register is not flag as safe this function
 * will test it against a list of register needind special handling.
 */
939
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
940 941 942 943 944 945 946
{
	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
	struct radeon_cs_reloc *reloc;
	u32 m, i, tmp, *ib;
	int r;

	i = (reg >> 7);
947
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
948 949 950 951 952 953 954 955
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return 0;
	ib = p->ib->ptr;
	switch (reg) {
L
Lucas De Marchi 已提交
956
	/* force following reg to 0 in an attempt to disable out buffer
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	 * which will need us to better understand how it works to perform
	 * security check on it (Jerome)
	 */
	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
	case R_008C44_SQ_ESGS_RING_SIZE:
	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
	case R_008C54_SQ_ESTMP_RING_SIZE:
	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
	case R_008C74_SQ_FBUF_RING_SIZE:
	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
	case R_008C5C_SQ_GSTMP_RING_SIZE:
	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
	case R_008C4C_SQ_GSVS_RING_SIZE:
	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
	case R_008C6C_SQ_PSTMP_RING_SIZE:
	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
	case R_008C7C_SQ_REDUC_RING_SIZE:
	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
	case R_008C64_SQ_VSTMP_RING_SIZE:
	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
		/* get value to populate the IB don't remove */
		tmp =radeon_get_ib_value(p, idx);
		ib[idx] = 0;
		break;
981 982 983
	case SQ_CONFIG:
		track->sq_config = radeon_get_ib_value(p, idx);
		break;
984 985 986 987
	case R_028800_DB_DEPTH_CONTROL:
		track->db_depth_control = radeon_get_ib_value(p, idx);
		break;
	case R_028010_DB_DEPTH_INFO:
988
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
989
		    r600_cs_packet_next_is_pkt3_nop(p)) {
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				dev_warn(p->dev, "bad SET_CONTEXT_REG "
					 "0x%04X\n", reg);
				return -EINVAL;
			}
			track->db_depth_info = radeon_get_ib_value(p, idx);
			ib[idx] &= C_028010_ARRAY_MODE;
			track->db_depth_info &= C_028010_ARRAY_MODE;
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
			} else {
				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
			}
		} else
			track->db_depth_info = radeon_get_ib_value(p, idx);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
		break;
	case R_028004_DB_DEPTH_VIEW:
		track->db_depth_view = radeon_get_ib_value(p, idx);
		break;
	case R_028000_DB_DEPTH_SIZE:
		track->db_depth_size = radeon_get_ib_value(p, idx);
		track->db_depth_size_idx = idx;
		break;
	case R_028AB0_VGT_STRMOUT_EN:
		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
		break;
	case R_028B20_VGT_STRMOUT_BUFFER_EN:
		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
		break;
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	case VGT_STRMOUT_BUFFER_BASE_0:
	case VGT_STRMOUT_BUFFER_BASE_1:
	case VGT_STRMOUT_BUFFER_BASE_2:
	case VGT_STRMOUT_BUFFER_BASE_3:
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->vgt_strmout_bo[tmp] = reloc->robj;
		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
		break;
	case VGT_STRMOUT_BUFFER_SIZE_0:
	case VGT_STRMOUT_BUFFER_SIZE_1:
	case VGT_STRMOUT_BUFFER_SIZE_2:
	case VGT_STRMOUT_BUFFER_SIZE_3:
		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
		/* size in register is DWs, convert to bytes */
		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
		break;
	case CP_COHER_BASE:
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
					"0x%04X\n", reg);
			return -EINVAL;
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	case R_028238_CB_TARGET_MASK:
		track->cb_target_mask = radeon_get_ib_value(p, idx);
		break;
	case R_02823C_CB_SHADER_MASK:
		track->cb_shader_mask = radeon_get_ib_value(p, idx);
		break;
	case R_028C04_PA_SC_AA_CONFIG:
		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
		track->nsamples = 1 << tmp;
		break;
	case R_0280A0_CB_COLOR0_INFO:
	case R_0280A4_CB_COLOR1_INFO:
	case R_0280A8_CB_COLOR2_INFO:
	case R_0280AC_CB_COLOR3_INFO:
	case R_0280B0_CB_COLOR4_INFO:
	case R_0280B4_CB_COLOR5_INFO:
	case R_0280B8_CB_COLOR6_INFO:
	case R_0280BC_CB_COLOR7_INFO:
1073
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1074
		     r600_cs_packet_next_is_pkt3_nop(p)) {
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
			}
		} else {
			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
		}
1093
		break;
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	case R_028080_CB_COLOR0_VIEW:
	case R_028084_CB_COLOR1_VIEW:
	case R_028088_CB_COLOR2_VIEW:
	case R_02808C_CB_COLOR3_VIEW:
	case R_028090_CB_COLOR4_VIEW:
	case R_028094_CB_COLOR5_VIEW:
	case R_028098_CB_COLOR6_VIEW:
	case R_02809C_CB_COLOR7_VIEW:
		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
		break;
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	case R_028060_CB_COLOR0_SIZE:
	case R_028064_CB_COLOR1_SIZE:
	case R_028068_CB_COLOR2_SIZE:
	case R_02806C_CB_COLOR3_SIZE:
	case R_028070_CB_COLOR4_SIZE:
	case R_028074_CB_COLOR5_SIZE:
	case R_028078_CB_COLOR6_SIZE:
	case R_02807C_CB_COLOR7_SIZE:
		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
		track->cb_color_size_idx[tmp] = idx;
		break;
		/* This register were added late, there is userspace
		 * which does provide relocation for those but set
		 * 0 offset. In order to avoid breaking old userspace
		 * we detect this and set address to point to last
		 * CB_COLOR0_BASE, note that if userspace doesn't set
		 * CB_COLOR0_BASE before this register we will report
		 * error. Old userspace always set CB_COLOR0_BASE
		 * before any of this.
		 */
	case R_0280E0_CB_COLOR0_FRAG:
	case R_0280E4_CB_COLOR1_FRAG:
	case R_0280E8_CB_COLOR2_FRAG:
	case R_0280EC_CB_COLOR3_FRAG:
	case R_0280F0_CB_COLOR4_FRAG:
	case R_0280F4_CB_COLOR5_FRAG:
	case R_0280F8_CB_COLOR6_FRAG:
	case R_0280FC_CB_COLOR7_FRAG:
		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			ib[idx] = track->cb_color_base_last[tmp];
			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
		} else {
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
			track->cb_color_frag_bo[tmp] = reloc->robj;
		}
		break;
	case R_0280C0_CB_COLOR0_TILE:
	case R_0280C4_CB_COLOR1_TILE:
	case R_0280C8_CB_COLOR2_TILE:
	case R_0280CC_CB_COLOR3_TILE:
	case R_0280D0_CB_COLOR4_TILE:
	case R_0280D4_CB_COLOR5_TILE:
	case R_0280D8_CB_COLOR6_TILE:
	case R_0280DC_CB_COLOR7_TILE:
		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
			if (!track->cb_color_base_last[tmp]) {
				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
				return -EINVAL;
			}
			ib[idx] = track->cb_color_base_last[tmp];
			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
		} else {
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
				return -EINVAL;
			}
			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
			track->cb_color_tile_bo[tmp] = reloc->robj;
		}
		break;
	case CB_COLOR0_BASE:
	case CB_COLOR1_BASE:
	case CB_COLOR2_BASE:
	case CB_COLOR3_BASE:
	case CB_COLOR4_BASE:
	case CB_COLOR5_BASE:
	case CB_COLOR6_BASE:
	case CB_COLOR7_BASE:
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1192
		tmp = (reg - CB_COLOR0_BASE) / 4;
1193
		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1194 1195 1196
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->cb_color_base_last[tmp] = ib[idx];
		track->cb_color_bo[tmp] = reloc->robj;
1197
		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1198 1199 1200 1201 1202 1203 1204 1205
		break;
	case DB_DEPTH_BASE:
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
		}
1206
		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1207 1208
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		track->db_bo = reloc->robj;
1209
		track->db_bo_mc = reloc->lobj.gpu_offset;
1210 1211 1212 1213 1214 1215 1216
		break;
	case DB_HTILE_DATA_BASE:
	case SQ_PGM_START_FS:
	case SQ_PGM_START_ES:
	case SQ_PGM_START_VS:
	case SQ_PGM_START_GS:
	case SQ_PGM_START_PS:
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	case SQ_ALU_CONST_CACHE_GS_0:
	case SQ_ALU_CONST_CACHE_GS_1:
	case SQ_ALU_CONST_CACHE_GS_2:
	case SQ_ALU_CONST_CACHE_GS_3:
	case SQ_ALU_CONST_CACHE_GS_4:
	case SQ_ALU_CONST_CACHE_GS_5:
	case SQ_ALU_CONST_CACHE_GS_6:
	case SQ_ALU_CONST_CACHE_GS_7:
	case SQ_ALU_CONST_CACHE_GS_8:
	case SQ_ALU_CONST_CACHE_GS_9:
	case SQ_ALU_CONST_CACHE_GS_10:
	case SQ_ALU_CONST_CACHE_GS_11:
	case SQ_ALU_CONST_CACHE_GS_12:
	case SQ_ALU_CONST_CACHE_GS_13:
	case SQ_ALU_CONST_CACHE_GS_14:
	case SQ_ALU_CONST_CACHE_GS_15:
	case SQ_ALU_CONST_CACHE_PS_0:
	case SQ_ALU_CONST_CACHE_PS_1:
	case SQ_ALU_CONST_CACHE_PS_2:
	case SQ_ALU_CONST_CACHE_PS_3:
	case SQ_ALU_CONST_CACHE_PS_4:
	case SQ_ALU_CONST_CACHE_PS_5:
	case SQ_ALU_CONST_CACHE_PS_6:
	case SQ_ALU_CONST_CACHE_PS_7:
	case SQ_ALU_CONST_CACHE_PS_8:
	case SQ_ALU_CONST_CACHE_PS_9:
	case SQ_ALU_CONST_CACHE_PS_10:
	case SQ_ALU_CONST_CACHE_PS_11:
	case SQ_ALU_CONST_CACHE_PS_12:
	case SQ_ALU_CONST_CACHE_PS_13:
	case SQ_ALU_CONST_CACHE_PS_14:
	case SQ_ALU_CONST_CACHE_PS_15:
	case SQ_ALU_CONST_CACHE_VS_0:
	case SQ_ALU_CONST_CACHE_VS_1:
	case SQ_ALU_CONST_CACHE_VS_2:
	case SQ_ALU_CONST_CACHE_VS_3:
	case SQ_ALU_CONST_CACHE_VS_4:
	case SQ_ALU_CONST_CACHE_VS_5:
	case SQ_ALU_CONST_CACHE_VS_6:
	case SQ_ALU_CONST_CACHE_VS_7:
	case SQ_ALU_CONST_CACHE_VS_8:
	case SQ_ALU_CONST_CACHE_VS_9:
	case SQ_ALU_CONST_CACHE_VS_10:
	case SQ_ALU_CONST_CACHE_VS_11:
	case SQ_ALU_CONST_CACHE_VS_12:
	case SQ_ALU_CONST_CACHE_VS_13:
	case SQ_ALU_CONST_CACHE_VS_14:
	case SQ_ALU_CONST_CACHE_VS_15:
1265 1266 1267 1268 1269
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "bad SET_CONTEXT_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1270 1271 1272 1273 1274 1275 1276 1277 1278
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
	case SX_MEMORY_EXPORT_BASE:
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			dev_warn(p->dev, "bad SET_CONFIG_REG "
					"0x%04X\n", reg);
			return -EINVAL;
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
		}
		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		break;
	default:
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return -EINVAL;
	}
	return 0;
}

1289
unsigned r600_mip_minify(unsigned size, unsigned level)
1290
{
1291 1292 1293 1294 1295 1296
	unsigned val;

	val = max(1U, size >> level);
	if (level > 0)
		val = roundup_pow_of_two(val);
	return val;
1297 1298
}

1299 1300 1301
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
			      unsigned w0, unsigned h0, unsigned d0, unsigned format,
			      unsigned block_align, unsigned height_align, unsigned base_align,
1302
			      unsigned *l0_size, unsigned *mipmap_size)
1303
{
1304 1305 1306 1307 1308
	unsigned offset, i, level;
	unsigned width, height, depth, size;
	unsigned blocksize;
	unsigned nbx, nby;
	unsigned nlevels = llevel - blevel + 1;
1309

1310
	*l0_size = -1;
1311
	blocksize = r600_fmt_get_blocksize(format);
1312

1313 1314 1315
	w0 = r600_mip_minify(w0, 0);
	h0 = r600_mip_minify(h0, 0);
	d0 = r600_mip_minify(d0, 0);
1316
	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1317 1318
		width = r600_mip_minify(w0, i);
		nbx = r600_fmt_get_nblocksx(format, width);
1319 1320 1321

		nbx = round_up(nbx, block_align);

1322 1323
		height = r600_mip_minify(h0, i);
		nby = r600_fmt_get_nblocksy(format, height);
1324 1325
		nby = round_up(nby, height_align);

1326
		depth = r600_mip_minify(d0, i);
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340

		size = nbx * nby * blocksize;
		if (nfaces)
			size *= nfaces;
		else
			size *= depth;

		if (i == 0)
			*l0_size = size;

		if (i == 0 || i == 1)
			offset = round_up(offset, base_align);

		offset += size;
1341 1342
	}
	*mipmap_size = offset;
1343
	if (llevel == 0)
1344
		*mipmap_size = *l0_size;
1345 1346
	if (!blevel)
		*mipmap_size -= *l0_size;
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
}

/**
 * r600_check_texture_resource() - check if register is authorized or not
 * @p: parser structure holding parsing context
 * @idx: index into the cs buffer
 * @texture: texture's bo structure
 * @mipmap: mipmap's bo structure
 *
 * This function will check that the resource has valid field and that
 * the texture and mipmap bo object are big enough to cover this resource.
 */
1359
static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1360 1361
					      struct radeon_bo *texture,
					      struct radeon_bo *mipmap,
1362 1363
					      u64 base_offset,
					      u64 mip_offset,
1364
					      u32 tiling_flags)
1365
{
1366
	struct r600_cs_track *track = p->track;
1367
	u32 nfaces, llevel, blevel, w0, h0, d0;
1368
	u32 word0, word1, l0_size, mipmap_size, word2, word3;
1369
	u32 height_align, pitch, pitch_align, depth_align;
1370
	u32 array, barray, larray;
1371 1372
	u64 base_align;
	struct array_mode_checker array_check;
1373
	u32 format;
1374 1375 1376 1377

	/* on legacy kernel we don't perform advanced check */
	if (p->rdev == NULL)
		return 0;
1378

1379 1380 1381 1382
	/* convert to bytes */
	base_offset <<= 8;
	mip_offset <<= 8;

1383
	word0 = radeon_get_ib_value(p, idx + 0);
1384
	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1385 1386 1387 1388 1389
		if (tiling_flags & RADEON_TILING_MACRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
		else if (tiling_flags & RADEON_TILING_MICRO)
			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
	}
1390 1391 1392 1393 1394
	word1 = radeon_get_ib_value(p, idx + 1);
	w0 = G_038000_TEX_WIDTH(word0) + 1;
	h0 = G_038004_TEX_HEIGHT(word1) + 1;
	d0 = G_038004_TEX_DEPTH(word1);
	nfaces = 1;
1395
	array = 0;
1396 1397 1398 1399 1400 1401
	switch (G_038000_DIM(word0)) {
	case V_038000_SQ_TEX_DIM_1D:
	case V_038000_SQ_TEX_DIM_2D:
	case V_038000_SQ_TEX_DIM_3D:
		break;
	case V_038000_SQ_TEX_DIM_CUBEMAP:
1402 1403 1404 1405
		if (p->family >= CHIP_RV770)
			nfaces = 8;
		else
			nfaces = 6;
1406 1407 1408
		break;
	case V_038000_SQ_TEX_DIM_1D_ARRAY:
	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1409 1410
		array = 1;
		break;
1411 1412 1413 1414 1415 1416
	case V_038000_SQ_TEX_DIM_2D_MSAA:
	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
	default:
		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
		return -EINVAL;
	}
1417
	format = G_038004_DATA_FORMAT(word1);
1418
	if (!r600_fmt_is_valid_texture(format, p->family)) {
1419
		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1420
			 __func__, __LINE__, format);
1421 1422
		return -EINVAL;
	}
1423

1424 1425 1426 1427 1428 1429 1430
	/* pitch in texels */
	pitch = (G_038000_PITCH(word0) + 1) * 8;
	array_check.array_mode = G_038000_TILE_MODE(word0);
	array_check.group_size = track->group_size;
	array_check.nbanks = track->nbanks;
	array_check.npipes = track->npipes;
	array_check.nsamples = 1;
1431
	array_check.blocksize = r600_fmt_get_blocksize(format);
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
	if (r600_get_array_mode_alignment(&array_check,
					  &pitch_align, &height_align, &depth_align, &base_align)) {
		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
			 __func__, __LINE__, G_038000_TILE_MODE(word0));
		return -EINVAL;
	}

	/* XXX check height as well... */

	if (!IS_ALIGNED(pitch, pitch_align)) {
1442 1443
		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1444 1445 1446
		return -EINVAL;
	}
	if (!IS_ALIGNED(base_offset, base_align)) {
1447 1448
		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1449 1450 1451
		return -EINVAL;
	}
	if (!IS_ALIGNED(mip_offset, base_align)) {
1452 1453
		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1454 1455 1456
		return -EINVAL;
	}

1457 1458 1459
	word2 = radeon_get_ib_value(p, idx + 2) << 8;
	word3 = radeon_get_ib_value(p, idx + 3) << 8;

1460 1461 1462
	word0 = radeon_get_ib_value(p, idx + 4);
	word1 = radeon_get_ib_value(p, idx + 5);
	blevel = G_038010_BASE_LEVEL(word0);
1463
	llevel = G_038014_LAST_LEVEL(word1);
1464 1465 1466 1467
	if (blevel > llevel) {
		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
			 blevel, llevel);
	}
1468 1469 1470 1471 1472 1473 1474 1475
	if (array == 1) {
		barray = G_038014_BASE_ARRAY(word1);
		larray = G_038014_LAST_ARRAY(word1);

		nfaces = larray - barray + 1;
	}
	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
			  pitch_align, height_align, base_align,
1476
			  &l0_size, &mipmap_size);
1477
	/* using get ib will give us the offset into the texture bo */
1478
	if ((l0_size + word2) > radeon_bo_size(texture)) {
1479 1480 1481 1482
		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
			 w0, h0, pitch_align, height_align,
			 array_check.array_mode, format, word2,
			 l0_size, radeon_bo_size(texture));
1483
		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1484 1485 1486
		return -EINVAL;
	}
	/* using get ib will give us the offset into the mipmap bo */
1487 1488
	word3 = radeon_get_ib_value(p, idx + 3) << 8;
	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1489
		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1490
		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1491 1492 1493 1494
	}
	return 0;
}

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
	u32 m, i;

	i = (reg >> 7);
	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
		return false;
	}
	m = 1 << ((reg >> 2) & 31);
	if (!(r600_reg_safe_bm[i] & m))
		return true;
	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
	return false;
}

1511 1512 1513 1514
static int r600_packet3_check(struct radeon_cs_parser *p,
				struct radeon_cs_packet *pkt)
{
	struct radeon_cs_reloc *reloc;
1515
	struct r600_cs_track *track;
1516 1517 1518 1519 1520
	volatile u32 *ib;
	unsigned idx;
	unsigned i;
	unsigned start_reg, end_reg, reg;
	int r;
1521
	u32 idx_value;
1522

1523
	track = (struct r600_cs_track *)p->track;
1524 1525
	ib = p->ib->ptr;
	idx = pkt->idx + 1;
1526
	idx_value = radeon_get_ib_value(p, idx);
1527

1528
	switch (pkt->opcode) {
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
	case PACKET3_SET_PREDICATION:
	{
		int pred_op;
		int tmp;
		if (pkt->count != 1) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

		tmp = radeon_get_ib_value(p, idx + 1);
		pred_op = (tmp >> 16) & 0x7;

		/* for the clear predicate operation */
		if (pred_op == 0)
			return 0;

		if (pred_op > 2) {
			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
			return -EINVAL;
		}

		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("bad SET PREDICATION\n");
			return -EINVAL;
		}

		ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
		ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
	}
	break;

1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
	case PACKET3_START_3D_CMDBUF:
		if (p->family >= CHIP_RV770 || pkt->count) {
			DRM_ERROR("bad START_3D\n");
			return -EINVAL;
		}
		break;
	case PACKET3_CONTEXT_CONTROL:
		if (pkt->count != 1) {
			DRM_ERROR("bad CONTEXT_CONTROL\n");
			return -EINVAL;
		}
		break;
	case PACKET3_INDEX_TYPE:
	case PACKET3_NUM_INSTANCES:
		if (pkt->count) {
			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
			return -EINVAL;
		}
		break;
	case PACKET3_DRAW_INDEX:
		if (pkt->count != 3) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("bad DRAW_INDEX\n");
			return -EINVAL;
		}
1590
		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1591
		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1592 1593 1594 1595 1596
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1597 1598 1599 1600 1601 1602
		break;
	case PACKET3_DRAW_INDEX_AUTO:
		if (pkt->count != 1) {
			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
			return -EINVAL;
		}
1603 1604 1605 1606 1607
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
			return r;
		}
1608 1609 1610 1611 1612 1613 1614
		break;
	case PACKET3_DRAW_INDEX_IMMD_BE:
	case PACKET3_DRAW_INDEX_IMMD:
		if (pkt->count < 2) {
			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
			return -EINVAL;
		}
1615 1616 1617 1618 1619
		r = r600_cs_track_check(p);
		if (r) {
			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
			return r;
		}
1620 1621 1622 1623 1624 1625 1626
		break;
	case PACKET3_WAIT_REG_MEM:
		if (pkt->count != 5) {
			DRM_ERROR("bad WAIT_REG_MEM\n");
			return -EINVAL;
		}
		/* bit 4 is reg (0) or mem (1) */
1627
		if (idx_value & 0x10) {
1628 1629 1630 1631 1632 1633
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad WAIT_REG_MEM\n");
				return -EINVAL;
			}
			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1634
			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1635 1636 1637 1638 1639 1640 1641 1642
		}
		break;
	case PACKET3_SURFACE_SYNC:
		if (pkt->count != 3) {
			DRM_ERROR("bad SURFACE_SYNC\n");
			return -EINVAL;
		}
		/* 0xffffffff/0x0 is flush all cache flag */
1643 1644
		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
		    radeon_get_ib_value(p, idx + 2) != 0) {
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad SURFACE_SYNC\n");
				return -EINVAL;
			}
			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
		}
		break;
	case PACKET3_EVENT_WRITE:
		if (pkt->count != 2 && pkt->count != 0) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
		if (pkt->count) {
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad EVENT_WRITE\n");
				return -EINVAL;
			}
			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1665
			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
		}
		break;
	case PACKET3_EVENT_WRITE_EOP:
		if (pkt->count != 4) {
			DRM_ERROR("bad EVENT_WRITE_EOP\n");
			return -EINVAL;
		}
		r = r600_cs_packet_next_reloc(p, &reloc);
		if (r) {
			DRM_ERROR("bad EVENT_WRITE\n");
			return -EINVAL;
		}
		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
1679
		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1680 1681
		break;
	case PACKET3_SET_CONFIG_REG:
1682
		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1683 1684 1685 1686 1687 1688 1689 1690 1691
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1692 1693 1694
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1695 1696 1697
		}
		break;
	case PACKET3_SET_CONTEXT_REG:
1698
		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1699 1700 1701 1702 1703 1704 1705 1706 1707
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
			return -EINVAL;
		}
		for (i = 0; i < pkt->count; i++) {
			reg = start_reg + (4 * i);
1708 1709 1710
			r = r600_cs_check_reg(p, reg, idx+1+i);
			if (r)
				return r;
1711 1712 1713 1714 1715 1716 1717
		}
		break;
	case PACKET3_SET_RESOURCE:
		if (pkt->count % 7) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
1718
		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1719 1720 1721 1722 1723 1724 1725 1726
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
			DRM_ERROR("bad SET_RESOURCE\n");
			return -EINVAL;
		}
		for (i = 0; i < (pkt->count / 7); i++) {
1727
			struct radeon_bo *texture, *mipmap;
1728
			u32 size, offset, base_offset, mip_offset;
1729

1730
			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1731 1732 1733 1734 1735 1736 1737
			case SQ_TEX_VTX_VALID_TEXTURE:
				/* tex base */
				r = r600_cs_packet_next_reloc(p, &reloc);
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1738
				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1739
				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1740 1741 1742 1743 1744
					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
				}
1745
				texture = reloc->robj;
1746 1747 1748 1749 1750 1751
				/* tex mip base */
				r = r600_cs_packet_next_reloc(p, &reloc);
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1752
				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1753 1754
				mipmap = reloc->robj;
				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1755 1756 1757 1758
								texture, mipmap,
								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
								reloc->lobj.tiling_flags);
1759 1760
				if (r)
					return r;
1761 1762
				ib[idx+1+(i*7)+2] += base_offset;
				ib[idx+1+(i*7)+3] += mip_offset;
1763 1764 1765 1766 1767 1768 1769 1770
				break;
			case SQ_TEX_VTX_VALID_BUFFER:
				/* vtx base */
				r = r600_cs_packet_next_reloc(p, &reloc);
				if (r) {
					DRM_ERROR("bad SET_RESOURCE\n");
					return -EINVAL;
				}
1771
				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1772
				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1773 1774
				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
					/* force size to size of the buffer */
1775 1776
					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
						 size + offset, radeon_bo_size(reloc->robj));
1777 1778
					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
				}
1779
				ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
1780
				ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
				break;
			case SQ_TEX_VTX_INVALID_TEXTURE:
			case SQ_TEX_VTX_INVALID_BUFFER:
			default:
				DRM_ERROR("bad SET_RESOURCE\n");
				return -EINVAL;
			}
		}
		break;
	case PACKET3_SET_ALU_CONST:
1791 1792 1793 1794 1795 1796 1797 1798 1799
		if (track->sq_config & DX9_CONSTS) {
			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
			end_reg = 4 * pkt->count + start_reg - 4;
			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
				DRM_ERROR("bad SET_ALU_CONST\n");
				return -EINVAL;
			}
1800 1801 1802
		}
		break;
	case PACKET3_SET_BOOL_CONST:
1803
		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
1804 1805 1806 1807 1808 1809 1810 1811 1812
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
			DRM_ERROR("bad SET_BOOL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_LOOP_CONST:
1813
		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
1814 1815 1816 1817 1818 1819 1820 1821 1822
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
			DRM_ERROR("bad SET_LOOP_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_CTL_CONST:
1823
		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
			DRM_ERROR("bad SET_CTL_CONST\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SET_SAMPLER:
		if (pkt->count % 3) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
1837
		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
		end_reg = 4 * pkt->count + start_reg - 4;
		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
			DRM_ERROR("bad SET_SAMPLER\n");
			return -EINVAL;
		}
		break;
	case PACKET3_SURFACE_BASE_UPDATE:
		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		if (pkt->count) {
			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
			return -EINVAL;
		}
		break;
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
	case PACKET3_STRMOUT_BUFFER_UPDATE:
		if (pkt->count != 4) {
			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
			return -EINVAL;
		}
		/* Updating memory at DST_ADDRESS. */
		if (idx_value & 0x1) {
			u64 offset;
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
		}
		/* Reading data from SRC_ADDRESS. */
		if (((idx_value >> 1) & 0x3) == 2) {
			u64 offset;
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
			ib[idx+3] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
			ib[idx+4] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
		}
		break;
	case PACKET3_COPY_DW:
		if (pkt->count != 4) {
			DRM_ERROR("bad COPY_DW (invalid count)\n");
			return -EINVAL;
		}
		if (idx_value & 0x1) {
			u64 offset;
			/* SRC is memory. */
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+1);
			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
		} else {
			/* SRC is a reg. */
			reg = radeon_get_ib_value(p, idx+1) << 2;
			if (!r600_is_safe_reg(p, reg, idx+1))
				return -EINVAL;
		}
		if (idx_value & 0x2) {
			u64 offset;
			/* DST is memory. */
			r = r600_cs_packet_next_reloc(p, &reloc);
			if (r) {
				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
				return -EINVAL;
			}
			offset = radeon_get_ib_value(p, idx+3);
			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
					  offset + 4, radeon_bo_size(reloc->robj));
				return -EINVAL;
			}
			ib[idx+3] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
			ib[idx+4] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
		} else {
			/* DST is a reg. */
			reg = radeon_get_ib_value(p, idx+3) << 2;
			if (!r600_is_safe_reg(p, reg, idx+3))
				return -EINVAL;
		}
		break;
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	case PACKET3_NOP:
		break;
	default:
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
		return -EINVAL;
	}
	return 0;
}

int r600_cs_parse(struct radeon_cs_parser *p)
{
	struct radeon_cs_packet pkt;
1962
	struct r600_cs_track *track;
1963 1964
	int r;

1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
	if (p->track == NULL) {
		/* initialize tracker, we are in kms */
		track = kzalloc(sizeof(*track), GFP_KERNEL);
		if (track == NULL)
			return -ENOMEM;
		r600_cs_track_init(track);
		if (p->rdev->family < CHIP_RV770) {
			track->npipes = p->rdev->config.r600.tiling_npipes;
			track->nbanks = p->rdev->config.r600.tiling_nbanks;
			track->group_size = p->rdev->config.r600.tiling_group_size;
		} else if (p->rdev->family <= CHIP_RV740) {
			track->npipes = p->rdev->config.rv770.tiling_npipes;
			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
			track->group_size = p->rdev->config.rv770.tiling_group_size;
		}
		p->track = track;
	}
1982 1983 1984
	do {
		r = r600_cs_packet_parse(p, &pkt, p->idx);
		if (r) {
1985 1986
			kfree(p->track);
			p->track = NULL;
1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
			return r;
		}
		p->idx += pkt.count + 2;
		switch (pkt.type) {
		case PACKET_TYPE0:
			r = r600_cs_parse_packet0(p, &pkt);
			break;
		case PACKET_TYPE2:
			break;
		case PACKET_TYPE3:
			r = r600_packet3_check(p, &pkt);
			break;
		default:
			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2001
			kfree(p->track);
2002
			p->track = NULL;
2003 2004 2005
			return -EINVAL;
		}
		if (r) {
2006
			kfree(p->track);
2007
			p->track = NULL;
2008 2009 2010 2011 2012 2013 2014 2015 2016
			return r;
		}
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
	for (r = 0; r < p->ib->length_dw; r++) {
		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib->ptr[r]);
		mdelay(1);
	}
#endif
2017
	kfree(p->track);
2018
	p->track = NULL;
2019 2020 2021 2022 2023 2024 2025 2026
	return 0;
}

static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
	if (p->chunk_relocs_idx == -1) {
		return 0;
	}
2027
	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
	if (p->relocs == NULL) {
		return -ENOMEM;
	}
	return 0;
}

/**
 * cs_parser_fini() - clean parser states
 * @parser:	parser structure holding parsing context.
 * @error:	error number
 *
 * If error is set than unvalidate buffer, otherwise just free memory
 * used by parsing context.
 **/
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
	unsigned i;

	kfree(parser->relocs);
	for (i = 0; i < parser->nchunks; i++) {
		kfree(parser->chunks[i].kdata);
2049 2050
		kfree(parser->chunks[i].kpage[0]);
		kfree(parser->chunks[i].kpage[1]);
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	}
	kfree(parser->chunks);
	kfree(parser->chunks_array);
}

int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
			unsigned family, u32 *ib, int *l)
{
	struct radeon_cs_parser parser;
	struct radeon_cs_chunk *ib_chunk;
2061 2062
	struct radeon_ib fake_ib;
	struct r600_cs_track *track;
2063 2064
	int r;

2065 2066 2067 2068 2069 2070
	/* initialize tracker */
	track = kzalloc(sizeof(*track), GFP_KERNEL);
	if (track == NULL)
		return -ENOMEM;
	r600_cs_track_init(track);
	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2071 2072 2073
	/* initialize parser */
	memset(&parser, 0, sizeof(struct radeon_cs_parser));
	parser.filp = filp;
2074
	parser.dev = &dev->pdev->dev;
2075 2076 2077
	parser.rdev = NULL;
	parser.family = family;
	parser.ib = &fake_ib;
2078
	parser.track = track;
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	fake_ib.ptr = ib;
	r = radeon_cs_parser_init(&parser, data);
	if (r) {
		DRM_ERROR("Failed to initialize parser !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	r = r600_cs_parser_relocs_legacy(&parser);
	if (r) {
		DRM_ERROR("Failed to parse relocation !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
	/* Copy the packet into the IB, the parser will read from the
	 * input memory (cached) and write to the IB (which can be
	 * uncached). */
	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
	parser.ib->length_dw = ib_chunk->length_dw;
	*l = parser.ib->length_dw;
	r = r600_cs_parse(&parser);
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2104 2105 2106 2107 2108 2109
	r = radeon_cs_finish_pages(&parser);
	if (r) {
		DRM_ERROR("Invalid command stream !\n");
		r600_cs_parser_fini(&parser, r);
		return r;
	}
2110 2111 2112 2113 2114 2115 2116 2117
	r600_cs_parser_fini(&parser, r);
	return r;
}

void r600_cs_legacy_init(void)
{
	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}