radeon_kms.c 25.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <drm/drmP.h>
29
#include "radeon.h"
30
#include <drm/radeon_drm.h>
31
#include "radeon_asic.h"
32

33
#include <linux/vga_switcheroo.h>
34
#include <linux/slab.h>
35
#include <linux/pm_runtime.h>
36 37 38 39 40 41 42

#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_is_px(void);
#else
static inline bool radeon_is_px(void) { return false; }
#endif

A
Alex Deucher 已提交
43 44 45 46 47 48 49 50 51 52 53
/**
 * radeon_driver_unload_kms - Main unload function for KMS.
 *
 * @dev: drm dev pointer
 *
 * This is the main unload function for KMS (all asics).
 * It calls radeon_modeset_fini() to tear down the
 * displays, and radeon_device_fini() to tear down
 * the rest of the device (CP, writeback, etc.).
 * Returns 0 on success.
 */
54 55 56 57 58 59
int radeon_driver_unload_kms(struct drm_device *dev)
{
	struct radeon_device *rdev = dev->dev_private;

	if (rdev == NULL)
		return 0;
60

61 62
	if (rdev->rmmio == NULL)
		goto done_free;
63 64 65

	pm_runtime_get_sync(dev->dev);

66
	radeon_acpi_fini(rdev);
67
	
68 69
	radeon_modeset_fini(rdev);
	radeon_device_fini(rdev);
70 71

done_free:
72 73 74 75
	kfree(rdev);
	dev->dev_private = NULL;
	return 0;
}
76

A
Alex Deucher 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89
/**
 * radeon_driver_load_kms - Main load function for KMS.
 *
 * @dev: drm dev pointer
 * @flags: device flags
 *
 * This is the main load function for KMS (all asics).
 * It calls radeon_device_init() to set up the non-display
 * parts of the chip (asic init, CP, writeback, etc.), and
 * radeon_modeset_init() to set up the display parts
 * (crtcs, encoders, hotplug detect, etc.).
 * Returns 0 on success, error on failure.
 */
90 91 92
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
	struct radeon_device *rdev;
93
	int r, acpi_status;
94 95 96 97 98 99 100 101

	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
	if (rdev == NULL) {
		return -ENOMEM;
	}
	dev->dev_private = (void *)rdev;

	/* update BUS flag */
102
	if (drm_pci_device_is_agp(dev)) {
103
		flags |= RADEON_IS_AGP;
J
Jon Mason 已提交
104
	} else if (pci_is_pcie(dev->pdev)) {
105 106 107 108 109
		flags |= RADEON_IS_PCIE;
	} else {
		flags |= RADEON_IS_PCI;
	}

110 111 112 113 114 115
	/* radeon_device_init should report only fatal error
	 * like memory allocation failure or iomapping failure,
	 * or memory manager initialization failure, it must
	 * properly initialize the GPU MC controller and permit
	 * VRAM allocation
	 */
116 117
	r = radeon_device_init(rdev, dev, dev->pdev, flags);
	if (r) {
118 119
		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
		goto out;
120
	}
121

122 123 124 125 126
	/* Again modeset_init should fail only on fatal error
	 * otherwise it should provide enough functionalities
	 * for shadowfb to run
	 */
	r = radeon_modeset_init(rdev);
127 128
	if (r)
		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
129 130 131 132 133 134 135 136 137 138 139

	/* Call ACPI methods: require modeset init
	 * but failure is not fatal
	 */
	if (!r) {
		acpi_status = radeon_acpi_init(rdev);
		if (acpi_status)
		dev_dbg(&dev->pdev->dev,
				"Error during ACPI methods call\n");
	}

140 141
	if ((radeon_runtime_pm == 1) ||
	    ((radeon_runtime_pm == -1) && radeon_is_px())) {
142 143 144 145 146 147 148 149
		pm_runtime_use_autosuspend(dev->dev);
		pm_runtime_set_autosuspend_delay(dev->dev, 5000);
		pm_runtime_set_active(dev->dev);
		pm_runtime_allow(dev->dev);
		pm_runtime_mark_last_busy(dev->dev);
		pm_runtime_put_autosuspend(dev->dev);
	}

150 151 152
out:
	if (r)
		radeon_driver_unload_kms(dev);
153 154


155
	return r;
156 157
}

A
Alex Deucher 已提交
158 159 160 161 162 163 164 165 166 167
/**
 * radeon_set_filp_rights - Set filp right.
 *
 * @dev: drm dev pointer
 * @owner: drm file
 * @applier: drm file
 * @value: value
 *
 * Sets the filp rights for the device (all asics).
 */
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
static void radeon_set_filp_rights(struct drm_device *dev,
				   struct drm_file **owner,
				   struct drm_file *applier,
				   uint32_t *value)
{
	mutex_lock(&dev->struct_mutex);
	if (*value == 1) {
		/* wants rights */
		if (!*owner)
			*owner = applier;
	} else if (*value == 0) {
		/* revokes rights */
		if (*owner == applier)
			*owner = NULL;
	}
	*value = *owner == applier ? 1 : 0;
	mutex_unlock(&dev->struct_mutex);
}
186 187

/*
188
 * Userspace get information ioctl
189
 */
A
Alex Deucher 已提交
190 191 192 193 194 195 196 197 198 199 200 201
/**
 * radeon_info_ioctl - answer a device specific request.
 *
 * @rdev: radeon device pointer
 * @data: request object
 * @filp: drm filp
 *
 * This function is used to pass device specific parameters to the userspace
 * drivers.  Examples include: pci device id, pipeline parms, tiling params,
 * etc. (all asics).
 * Returns 0 on success, -EINVAL on failure.
 */
202
static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 204
{
	struct radeon_device *rdev = dev->dev_private;
205
	struct drm_radeon_info *info = data;
206
	struct radeon_mode_info *minfo = &rdev->mode_info;
207 208
	uint32_t *value, value_tmp, *value_ptr, value_size;
	uint64_t value64;
209 210
	struct drm_crtc *crtc;
	int i, found;
211 212

	value_ptr = (uint32_t *)((unsigned long)info->value);
213 214
	value = &value_tmp;
	value_size = sizeof(uint32_t);
215

216 217
	switch (info->request) {
	case RADEON_INFO_DEVICE_ID:
218
		*value = dev->pdev->device;
219 220
		break;
	case RADEON_INFO_NUM_GB_PIPES:
221
		*value = rdev->num_gb_pipes;
222
		break;
223
	case RADEON_INFO_NUM_Z_PIPES:
224
		*value = rdev->num_z_pipes;
225
		break;
226
	case RADEON_INFO_ACCEL_WORKING:
227 228
		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
229
			*value = false;
230
		else
231
			*value = rdev->accel_working;
232
		break;
233
	case RADEON_INFO_CRTC_FROM_ID:
D
Daniel Vetter 已提交
234
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
235 236 237
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
238 239
		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
			crtc = (struct drm_crtc *)minfo->crtcs[i];
240
			if (crtc && crtc->base.id == *value) {
241
				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
242
				*value = radeon_crtc->crtc_id;
243 244 245 246 247
				found = 1;
				break;
			}
		}
		if (!found) {
248
			DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
249 250 251
			return -EINVAL;
		}
		break;
252
	case RADEON_INFO_ACCEL_WORKING2:
253
		*value = rdev->accel_working;
254
		break;
255
	case RADEON_INFO_TILING_CONFIG:
256 257 258
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.tile_config;
		else if (rdev->family >= CHIP_TAHITI)
259
			*value = rdev->config.si.tile_config;
260
		else if (rdev->family >= CHIP_CAYMAN)
261
			*value = rdev->config.cayman.tile_config;
262
		else if (rdev->family >= CHIP_CEDAR)
263
			*value = rdev->config.evergreen.tile_config;
264
		else if (rdev->family >= CHIP_RV770)
265
			*value = rdev->config.rv770.tile_config;
266
		else if (rdev->family >= CHIP_R600)
267
			*value = rdev->config.r600.tile_config;
268
		else {
269
			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
270 271
			return -EINVAL;
		}
272
		break;
273
	case RADEON_INFO_WANT_HYPERZ:
274 275 276 277 278 279
		/* The "value" here is both an input and output parameter.
		 * If the input value is 1, filp requests hyper-z access.
		 * If the input value is 0, filp revokes its hyper-z access.
		 *
		 * When returning, the value is 1 if filp owns hyper-z access,
		 * 0 otherwise. */
D
Daniel Vetter 已提交
280
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
281 282 283 284 285
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		if (*value >= 2) {
			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
286 287
			return -EINVAL;
		}
288
		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
289 290 291
		break;
	case RADEON_INFO_WANT_CMASK:
		/* The same logic as Hyper-Z. */
D
Daniel Vetter 已提交
292
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
293 294 295 296 297
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		if (*value >= 2) {
			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
298
			return -EINVAL;
299
		}
300
		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
301
		break;
302 303
	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
		/* return clock value in KHz */
304
		if (rdev->asic->get_xclk)
305
			*value = radeon_get_xclk(rdev) * 10;
306
		else
307
			*value = rdev->clock.spll.reference_freq * 10;
308
		break;
309
	case RADEON_INFO_NUM_BACKENDS:
310 311 312 313
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_backends_per_se *
				rdev->config.cik.max_shader_engines;
		else if (rdev->family >= CHIP_TAHITI)
314
			*value = rdev->config.si.max_backends_per_se *
315 316
				rdev->config.si.max_shader_engines;
		else if (rdev->family >= CHIP_CAYMAN)
317
			*value = rdev->config.cayman.max_backends_per_se *
318 319
				rdev->config.cayman.max_shader_engines;
		else if (rdev->family >= CHIP_CEDAR)
320
			*value = rdev->config.evergreen.max_backends;
321
		else if (rdev->family >= CHIP_RV770)
322
			*value = rdev->config.rv770.max_backends;
323
		else if (rdev->family >= CHIP_R600)
324
			*value = rdev->config.r600.max_backends;
325 326 327 328
		else {
			return -EINVAL;
		}
		break;
329
	case RADEON_INFO_NUM_TILE_PIPES:
330 331 332
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_tile_pipes;
		else if (rdev->family >= CHIP_TAHITI)
333
			*value = rdev->config.si.max_tile_pipes;
334
		else if (rdev->family >= CHIP_CAYMAN)
335
			*value = rdev->config.cayman.max_tile_pipes;
336
		else if (rdev->family >= CHIP_CEDAR)
337
			*value = rdev->config.evergreen.max_tile_pipes;
338
		else if (rdev->family >= CHIP_RV770)
339
			*value = rdev->config.rv770.max_tile_pipes;
340
		else if (rdev->family >= CHIP_R600)
341
			*value = rdev->config.r600.max_tile_pipes;
342 343 344 345
		else {
			return -EINVAL;
		}
		break;
346
	case RADEON_INFO_FUSION_GART_WORKING:
347
		*value = 1;
348
		break;
349
	case RADEON_INFO_BACKEND_MAP:
350
		if (rdev->family >= CHIP_BONAIRE)
351
			*value = rdev->config.cik.backend_map;
352
		else if (rdev->family >= CHIP_TAHITI)
353
			*value = rdev->config.si.backend_map;
354
		else if (rdev->family >= CHIP_CAYMAN)
355
			*value = rdev->config.cayman.backend_map;
356
		else if (rdev->family >= CHIP_CEDAR)
357
			*value = rdev->config.evergreen.backend_map;
358
		else if (rdev->family >= CHIP_RV770)
359
			*value = rdev->config.rv770.backend_map;
360
		else if (rdev->family >= CHIP_R600)
361
			*value = rdev->config.r600.backend_map;
362 363 364 365
		else {
			return -EINVAL;
		}
		break;
366 367 368 369
	case RADEON_INFO_VA_START:
		/* this is where we report if vm is supported or not */
		if (rdev->family < CHIP_CAYMAN)
			return -EINVAL;
370
		*value = RADEON_VA_RESERVED_SIZE;
371 372 373 374 375
		break;
	case RADEON_INFO_IB_VM_MAX_SIZE:
		/* this is where we report if vm is supported or not */
		if (rdev->family < CHIP_CAYMAN)
			return -EINVAL;
376
		*value = RADEON_IB_VM_MAX_SIZE;
377
		break;
378
	case RADEON_INFO_MAX_PIPES:
379 380 381
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_cu_per_sh;
		else if (rdev->family >= CHIP_TAHITI)
382
			*value = rdev->config.si.max_cu_per_sh;
383
		else if (rdev->family >= CHIP_CAYMAN)
384
			*value = rdev->config.cayman.max_pipes_per_simd;
385
		else if (rdev->family >= CHIP_CEDAR)
386
			*value = rdev->config.evergreen.max_pipes;
387
		else if (rdev->family >= CHIP_RV770)
388
			*value = rdev->config.rv770.max_pipes;
389
		else if (rdev->family >= CHIP_R600)
390
			*value = rdev->config.r600.max_pipes;
391 392 393 394
		else {
			return -EINVAL;
		}
		break;
395 396 397 398 399 400 401 402 403
	case RADEON_INFO_TIMESTAMP:
		if (rdev->family < CHIP_R600) {
			DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
			return -EINVAL;
		}
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = radeon_get_gpu_clock_counter(rdev);
		break;
404
	case RADEON_INFO_MAX_SE:
405 406 407
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_shader_engines;
		else if (rdev->family >= CHIP_TAHITI)
408
			*value = rdev->config.si.max_shader_engines;
409
		else if (rdev->family >= CHIP_CAYMAN)
410
			*value = rdev->config.cayman.max_shader_engines;
411
		else if (rdev->family >= CHIP_CEDAR)
412
			*value = rdev->config.evergreen.num_ses;
413
		else
414
			*value = 1;
415 416
		break;
	case RADEON_INFO_MAX_SH_PER_SE:
417 418 419
		if (rdev->family >= CHIP_BONAIRE)
			*value = rdev->config.cik.max_sh_per_se;
		else if (rdev->family >= CHIP_TAHITI)
420
			*value = rdev->config.si.max_sh_per_se;
421 422 423
		else
			return -EINVAL;
		break;
424
	case RADEON_INFO_FASTFB_WORKING:
425
		*value = rdev->fastfb_working;
426
		break;
427
	case RADEON_INFO_RING_WORKING:
D
Daniel Vetter 已提交
428
		if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
429 430 431 432
			DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
			return -EFAULT;
		}
		switch (*value) {
433 434
		case RADEON_CS_RING_GFX:
		case RADEON_CS_RING_COMPUTE:
435
			*value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
436 437
			break;
		case RADEON_CS_RING_DMA:
438 439
			*value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
			*value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
440 441
			break;
		case RADEON_CS_RING_UVD:
442
			*value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
443
			break;
444 445 446
		case RADEON_CS_RING_VCE:
			*value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
			break;
447 448 449 450
		default:
			return -EINVAL;
		}
		break;
451
	case RADEON_INFO_SI_TILE_MODE_ARRAY:
452
		if (rdev->family >= CHIP_BONAIRE) {
453 454 455 456 457 458 459
			value = rdev->config.cik.tile_mode_array;
			value_size = sizeof(uint32_t)*32;
		} else if (rdev->family >= CHIP_TAHITI) {
			value = rdev->config.si.tile_mode_array;
			value_size = sizeof(uint32_t)*32;
		} else {
			DRM_DEBUG_KMS("tile mode array is si+ only!\n");
460 461
			return -EINVAL;
		}
462
		break;
463 464 465 466 467 468 469 470 471
	case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
		if (rdev->family >= CHIP_BONAIRE) {
			value = rdev->config.cik.macrotile_mode_array;
			value_size = sizeof(uint32_t)*16;
		} else {
			DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
			return -EINVAL;
		}
		break;
472 473 474
	case RADEON_INFO_SI_CP_DMA_COMPUTE:
		*value = 1;
		break;
475 476 477 478 479 480 481 482 483
	case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
		if (rdev->family >= CHIP_BONAIRE) {
			*value = rdev->config.cik.backend_enable_mask;
		} else if (rdev->family >= CHIP_TAHITI) {
			*value = rdev->config.si.backend_enable_mask;
		} else {
			DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
		}
		break;
484 485 486 487 488 489 490
	case RADEON_INFO_MAX_SCLK:
		if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
		    rdev->pm.dpm_enabled)
			*value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
		else
			*value = rdev->pm.default_sclk * 10;
		break;
491 492 493 494 495 496
	case RADEON_INFO_VCE_FW_VERSION:
		*value = rdev->vce.fw_version;
		break;
	case RADEON_INFO_VCE_FB_VERSION:
		*value = rdev->vce.fb_version;
		break;
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	case RADEON_INFO_NUM_BYTES_MOVED:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->num_bytes_moved);
		break;
	case RADEON_INFO_VRAM_USAGE:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->vram_usage);
		break;
	case RADEON_INFO_GTT_USAGE:
		value = (uint32_t*)&value64;
		value_size = sizeof(uint64_t);
		value64 = atomic64_read(&rdev->gtt_usage);
		break;
512
	default:
513
		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
514 515
		return -EINVAL;
	}
D
Daniel Vetter 已提交
516
	if (copy_to_user(value_ptr, (char*)value, value_size)) {
517
		DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
518 519 520 521 522 523 524 525 526
		return -EFAULT;
	}
	return 0;
}


/*
 * Outdated mess for old drm with Xorg being in charge (void function now).
 */
A
Alex Deucher 已提交
527 528 529 530 531 532 533
/**
 * radeon_driver_firstopen_kms - drm callback for last close
 *
 * @dev: drm dev pointer
 *
 * Switch vga switcheroo state after last close (all asics).
 */
534 535
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
536
	vga_switcheroo_process_delayed_switch();
537 538
}

A
Alex Deucher 已提交
539 540 541 542 543 544 545 546 547
/**
 * radeon_driver_open_kms - drm callback for open
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device open, init vm on cayman+ (all asics).
 * Returns 0 on success, error on failure.
 */
548 549
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
550
	struct radeon_device *rdev = dev->dev_private;
551
	int r;
552 553 554

	file_priv->driver_priv = NULL;

555 556 557 558
	r = pm_runtime_get_sync(dev->dev);
	if (r < 0)
		return r;

559 560 561
	/* new gpu have virtual address space support */
	if (rdev->family >= CHIP_CAYMAN) {
		struct radeon_fpriv *fpriv;
562
		struct radeon_bo_va *bo_va;
563 564 565 566 567 568 569
		int r;

		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
		if (unlikely(!fpriv)) {
			return -ENOMEM;
		}

570 571 572
		r = radeon_vm_init(rdev, &fpriv->vm);
		if (r)
			return r;
573

574 575 576 577
		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
		if (r)
			return r;

578 579 580 581 582 583 584
		/* map the ib pool buffer read only into
		 * virtual address space */
		bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
					 rdev->ring_tmp_bo.bo);
		r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
					  RADEON_VM_PAGE_READABLE |
					  RADEON_VM_PAGE_SNOOPED);
585 586

		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
587 588 589 590 591 592 593 594
		if (r) {
			radeon_vm_fini(rdev, &fpriv->vm);
			kfree(fpriv);
			return r;
		}

		file_priv->driver_priv = fpriv;
	}
595 596 597

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
598 599 600
	return 0;
}

A
Alex Deucher 已提交
601 602 603 604 605 606 607 608
/**
 * radeon_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
609 610 611
void radeon_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
612 613 614 615 616
	struct radeon_device *rdev = dev->dev_private;

	/* new gpu have virtual address space support */
	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
		struct radeon_fpriv *fpriv = file_priv->driver_priv;
617 618 619 620 621 622 623 624 625 626 627
		struct radeon_bo_va *bo_va;
		int r;

		r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
		if (!r) {
			bo_va = radeon_vm_bo_find(&fpriv->vm,
						  rdev->ring_tmp_bo.bo);
			if (bo_va)
				radeon_vm_bo_rmv(rdev, bo_va);
			radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
		}
628 629 630 631 632

		radeon_vm_fini(rdev, &fpriv->vm);
		kfree(fpriv);
		file_priv->driver_priv = NULL;
	}
633 634
}

A
Alex Deucher 已提交
635 636 637 638 639 640 641 642 643
/**
 * radeon_driver_preclose_kms - drm callback for pre close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
 * (all asics).
 */
644 645 646
void radeon_driver_preclose_kms(struct drm_device *dev,
				struct drm_file *file_priv)
{
647 648 649
	struct radeon_device *rdev = dev->dev_private;
	if (rdev->hyperz_filp == file_priv)
		rdev->hyperz_filp = NULL;
650 651
	if (rdev->cmask_filp == file_priv)
		rdev->cmask_filp = NULL;
C
Christian König 已提交
652
	radeon_uvd_free_handles(rdev, file_priv);
653
	radeon_vce_free_handles(rdev, file_priv);
654 655 656 657 658
}

/*
 * VBlank related functions.
 */
A
Alex Deucher 已提交
659 660 661 662 663 664 665 666 667
/**
 * radeon_get_vblank_counter_kms - get frame count
 *
 * @dev: drm dev pointer
 * @crtc: crtc to get the frame count from
 *
 * Gets the frame count on the requested crtc (all asics).
 * Returns frame count on success, -EINVAL on failure.
 */
668 669
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
670 671
	struct radeon_device *rdev = dev->dev_private;

672
	if (crtc < 0 || crtc >= rdev->num_crtc) {
673 674 675 676 677
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return -EINVAL;
	}

	return radeon_get_vblank_counter(rdev, crtc);
678 679
}

A
Alex Deucher 已提交
680 681 682 683 684 685 686 687 688
/**
 * radeon_enable_vblank_kms - enable vblank interrupt
 *
 * @dev: drm dev pointer
 * @crtc: crtc to enable vblank interrupt for
 *
 * Enable the interrupt on the requested crtc (all asics).
 * Returns 0 on success, -EINVAL on failure.
 */
689 690
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
691
	struct radeon_device *rdev = dev->dev_private;
692 693
	unsigned long irqflags;
	int r;
694

695
	if (crtc < 0 || crtc >= rdev->num_crtc) {
696 697 698 699
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return -EINVAL;
	}

700
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
701
	rdev->irq.crtc_vblank_int[crtc] = true;
702 703 704
	r = radeon_irq_set(rdev);
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
	return r;
705 706
}

A
Alex Deucher 已提交
707 708 709 710 711 712 713 714
/**
 * radeon_disable_vblank_kms - disable vblank interrupt
 *
 * @dev: drm dev pointer
 * @crtc: crtc to disable vblank interrupt for
 *
 * Disable the interrupt on the requested crtc (all asics).
 */
715 716
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
717
	struct radeon_device *rdev = dev->dev_private;
718
	unsigned long irqflags;
719

720
	if (crtc < 0 || crtc >= rdev->num_crtc) {
721 722 723 724
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return;
	}

725
	spin_lock_irqsave(&rdev->irq.lock, irqflags);
726 727
	rdev->irq.crtc_vblank_int[crtc] = false;
	radeon_irq_set(rdev);
728
	spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
729 730
}

A
Alex Deucher 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743
/**
 * radeon_get_vblank_timestamp_kms - get vblank timestamp
 *
 * @dev: drm dev pointer
 * @crtc: crtc to get the timestamp for
 * @max_error: max error
 * @vblank_time: time value
 * @flags: flags passed to the driver
 *
 * Gets the timestamp on the requested crtc based on the
 * scanout position.  (all asics).
 * Returns postive status flags on success, negative error on failure.
 */
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
				    int *max_error,
				    struct timeval *vblank_time,
				    unsigned flags)
{
	struct drm_crtc *drmcrtc;
	struct radeon_device *rdev = dev->dev_private;

	if (crtc < 0 || crtc >= dev->num_crtcs) {
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return -EINVAL;
	}

	/* Get associated drm_crtc: */
	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;

	/* Helper routine in DRM core does all the work: */
	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
						     vblank_time, flags,
763
						     drmcrtc, &drmcrtc->hwmode);
764
}
765 766

#define KMS_INVALID_IOCTL(name)						\
767 768
static int name(struct drm_device *dev, void *data, struct drm_file	\
		*file_priv)						\
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
{									\
	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
	return -EINVAL;							\
}

/*
 * All these ioctls are invalid in kms world.
 */
KMS_INVALID_IOCTL(radeon_cp_init_kms)
KMS_INVALID_IOCTL(radeon_cp_start_kms)
KMS_INVALID_IOCTL(radeon_cp_stop_kms)
KMS_INVALID_IOCTL(radeon_cp_reset_kms)
KMS_INVALID_IOCTL(radeon_cp_idle_kms)
KMS_INVALID_IOCTL(radeon_cp_resume_kms)
KMS_INVALID_IOCTL(radeon_engine_reset_kms)
KMS_INVALID_IOCTL(radeon_fullscreen_kms)
KMS_INVALID_IOCTL(radeon_cp_swap_kms)
KMS_INVALID_IOCTL(radeon_cp_clear_kms)
KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
KMS_INVALID_IOCTL(radeon_cp_indices_kms)
KMS_INVALID_IOCTL(radeon_cp_texture_kms)
KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
KMS_INVALID_IOCTL(radeon_cp_flip_kms)
KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
KMS_INVALID_IOCTL(radeon_mem_free_kms)
KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
KMS_INVALID_IOCTL(radeon_irq_emit_kms)
KMS_INVALID_IOCTL(radeon_irq_wait_kms)
KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
KMS_INVALID_IOCTL(radeon_surface_free_kms)


R
Rob Clark 已提交
806
const struct drm_ioctl_desc radeon_ioctls_kms[] = {
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
834
	/* KMS */
835 836 837 838
	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
839 840
	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
841 842 843 844 845 846 847
	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
848
	DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
849 850
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);