drm.c 30.8 KB
Newer Older
T
Thierry Reding 已提交
1 2
/*
 * Copyright (C) 2012 Avionic Design GmbH
3
 * Copyright (C) 2012-2016 NVIDIA CORPORATION.  All rights reserved.
T
Thierry Reding 已提交
4 5 6 7 8 9
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

10
#include <linux/bitops.h>
11
#include <linux/host1x.h>
12
#include <linux/idr.h>
T
Thierry Reding 已提交
13
#include <linux/iommu.h>
14

15
#include <drm/drm_atomic.h>
16 17
#include <drm/drm_atomic_helper.h>

T
Thierry Reding 已提交
18
#include "drm.h"
19
#include "gem.h"
T
Thierry Reding 已提交
20 21 22 23 24 25 26 27

#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0

28
#define CARVEOUT_SZ SZ_64M
29
#define CDMA_GATHER_FETCHES_MAX_NB 16383
30

31
struct tegra_drm_file {
32 33
	struct idr contexts;
	struct mutex lock;
34 35
};

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static void tegra_atomic_schedule(struct tegra_drm *tegra,
				  struct drm_atomic_state *state)
{
	tegra->commit.state = state;
	schedule_work(&tegra->commit.work);
}

static void tegra_atomic_complete(struct tegra_drm *tegra,
				  struct drm_atomic_state *state)
{
	struct drm_device *drm = tegra->drm;

	/*
	 * Everything below can be run asynchronously without the need to grab
	 * any modeset locks at all under one condition: It must be guaranteed
	 * that the asynchronous work has either been cancelled (if the driver
	 * supports it, which at least requires that the framebuffers get
	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
	 * before the new state gets committed on the software side with
	 * drm_atomic_helper_swap_state().
	 *
	 * This scheme allows new atomic state updates to be prepared and
	 * checked in parallel to the asynchronous completion of the previous
	 * update. Which is important since compositors need to figure out the
	 * composition of the next frame right after having submitted the
	 * current layout.
	 */

64 65
	drm_atomic_helper_commit_modeset_disables(drm, state);
	drm_atomic_helper_commit_modeset_enables(drm, state);
66 67
	drm_atomic_helper_commit_planes(drm, state,
					DRM_PLANE_COMMIT_ACTIVE_ONLY);
68 69 70 71

	drm_atomic_helper_wait_for_vblanks(drm, state);

	drm_atomic_helper_cleanup_planes(drm, state);
72
	drm_atomic_state_put(state);
73 74 75 76 77 78 79 80 81 82 83
}

static void tegra_atomic_work(struct work_struct *work)
{
	struct tegra_drm *tegra = container_of(work, struct tegra_drm,
					       commit.work);

	tegra_atomic_complete(tegra, tegra->commit.state);
}

static int tegra_atomic_commit(struct drm_device *drm,
84
			       struct drm_atomic_state *state, bool nonblock)
85 86 87 88 89 90 91 92
{
	struct tegra_drm *tegra = drm->dev_private;
	int err;

	err = drm_atomic_helper_prepare_planes(drm, state);
	if (err)
		return err;

93
	/* serialize outstanding nonblocking commits */
94 95 96 97 98 99 100 101 102
	mutex_lock(&tegra->commit.lock);
	flush_work(&tegra->commit.work);

	/*
	 * This is the point of no return - everything below never fails except
	 * when the hw goes bonghits. Which means we can commit the new state on
	 * the software side now.
	 */

103
	drm_atomic_helper_swap_state(state, true);
104

105
	drm_atomic_state_get(state);
106
	if (nonblock)
107 108 109 110 111 112 113 114
		tegra_atomic_schedule(tegra, state);
	else
		tegra_atomic_complete(tegra, state);

	mutex_unlock(&tegra->commit.lock);
	return 0;
}

115 116
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
	.fb_create = tegra_fb_create,
117
#ifdef CONFIG_DRM_FBDEV_EMULATION
118 119
	.output_poll_changed = tegra_fb_output_poll_changed,
#endif
120
	.atomic_check = drm_atomic_helper_check,
121
	.atomic_commit = tegra_atomic_commit,
122 123
};

124
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
125
{
126
	struct host1x_device *device = to_host1x_device(drm->dev);
127
	struct tegra_drm *tegra;
128 129
	int err;

130
	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
131
	if (!tegra)
132 133
		return -ENOMEM;

T
Thierry Reding 已提交
134
	if (iommu_present(&platform_bus_type)) {
135
		u64 carveout_start, carveout_end, gem_start, gem_end;
136
		struct iommu_domain_geometry *geometry;
137
		unsigned long order;
138

T
Thierry Reding 已提交
139
		tegra->domain = iommu_domain_alloc(&platform_bus_type);
140 141
		if (!tegra->domain) {
			err = -ENOMEM;
T
Thierry Reding 已提交
142 143 144
			goto free;
		}

145
		geometry = &tegra->domain->geometry;
146 147 148 149 150 151 152 153 154
		gem_start = geometry->aperture_start;
		gem_end = geometry->aperture_end - CARVEOUT_SZ;
		carveout_start = gem_end + 1;
		carveout_end = geometry->aperture_end;

		order = __ffs(tegra->domain->pgsize_bitmap);
		init_iova_domain(&tegra->carveout.domain, 1UL << order,
				 carveout_start >> order,
				 carveout_end >> order);
155

156 157 158 159
		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;

		drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
160
		mutex_init(&tegra->mm_lock);
161 162 163 164 165

		DRM_DEBUG("IOMMU apertures:\n");
		DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
		DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
			  carveout_end);
T
Thierry Reding 已提交
166 167
	}

168 169
	mutex_init(&tegra->clients_lock);
	INIT_LIST_HEAD(&tegra->clients);
170 171 172 173

	mutex_init(&tegra->commit.lock);
	INIT_WORK(&tegra->commit.work, tegra_atomic_work);

174 175
	drm->dev_private = tegra;
	tegra->drm = drm;
T
Thierry Reding 已提交
176 177 178

	drm_mode_config_init(drm);

179 180 181 182 183 184
	drm->mode_config.min_width = 0;
	drm->mode_config.min_height = 0;

	drm->mode_config.max_width = 4096;
	drm->mode_config.max_height = 4096;

185 186
	drm->mode_config.allow_fb_modifiers = true;

187 188
	drm->mode_config.funcs = &tegra_drm_mode_funcs;

189 190
	err = tegra_drm_fb_prepare(drm);
	if (err < 0)
191
		goto config;
192 193 194

	drm_kms_helper_poll_init(drm);

195
	err = host1x_device_init(device);
T
Thierry Reding 已提交
196
	if (err < 0)
197
		goto fbdev;
T
Thierry Reding 已提交
198

199 200 201 202 203
	/*
	 * We don't use the drm_irq_install() helpers provided by the DRM
	 * core, so we need to set this manually in order to allow the
	 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
	 */
V
Ville Syrjälä 已提交
204
	drm->irq_enabled = true;
205

206 207 208
	/* syncpoints are used for full 32-bit hardware VBLANK counters */
	drm->max_vblank_count = 0xffffffff;

209 210
	err = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (err < 0)
211
		goto device;
212

213 214
	drm_mode_config_reset(drm);

T
Thierry Reding 已提交
215 216
	err = tegra_drm_fb_init(drm);
	if (err < 0)
217
		goto vblank;
T
Thierry Reding 已提交
218 219

	return 0;
220 221 222 223 224 225 226 227 228 229

vblank:
	drm_vblank_cleanup(drm);
device:
	host1x_device_exit(device);
fbdev:
	drm_kms_helper_poll_fini(drm);
	tegra_drm_fb_free(drm);
config:
	drm_mode_config_cleanup(drm);
T
Thierry Reding 已提交
230 231 232 233

	if (tegra->domain) {
		iommu_domain_free(tegra->domain);
		drm_mm_takedown(&tegra->mm);
234
		mutex_destroy(&tegra->mm_lock);
235
		put_iova_domain(&tegra->carveout.domain);
T
Thierry Reding 已提交
236 237
	}
free:
238 239
	kfree(tegra);
	return err;
T
Thierry Reding 已提交
240 241
}

242
static void tegra_drm_unload(struct drm_device *drm)
T
Thierry Reding 已提交
243
{
244
	struct host1x_device *device = to_host1x_device(drm->dev);
T
Thierry Reding 已提交
245
	struct tegra_drm *tegra = drm->dev_private;
246 247
	int err;

T
Thierry Reding 已提交
248 249
	drm_kms_helper_poll_fini(drm);
	tegra_drm_fb_exit(drm);
250
	drm_mode_config_cleanup(drm);
251
	drm_vblank_cleanup(drm);
T
Thierry Reding 已提交
252

253 254
	err = host1x_device_exit(device);
	if (err < 0)
255
		return;
256

T
Thierry Reding 已提交
257 258 259
	if (tegra->domain) {
		iommu_domain_free(tegra->domain);
		drm_mm_takedown(&tegra->mm);
260
		mutex_destroy(&tegra->mm_lock);
261
		put_iova_domain(&tegra->carveout.domain);
T
Thierry Reding 已提交
262 263
	}

T
Thierry Reding 已提交
264
	kfree(tegra);
T
Thierry Reding 已提交
265 266 267 268
}

static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
269
	struct tegra_drm_file *fpriv;
T
Terje Bergstrom 已提交
270 271 272 273 274

	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
	if (!fpriv)
		return -ENOMEM;

275 276
	idr_init(&fpriv->contexts);
	mutex_init(&fpriv->lock);
T
Terje Bergstrom 已提交
277 278
	filp->driver_priv = fpriv;

T
Thierry Reding 已提交
279 280 281
	return 0;
}

282
static void tegra_drm_context_free(struct tegra_drm_context *context)
T
Terje Bergstrom 已提交
283 284 285 286 287
{
	context->client->ops->close_channel(context);
	kfree(context);
}

T
Thierry Reding 已提交
288 289
static void tegra_drm_lastclose(struct drm_device *drm)
{
290
#ifdef CONFIG_DRM_FBDEV_EMULATION
291
	struct tegra_drm *tegra = drm->dev_private;
T
Thierry Reding 已提交
292

293
	tegra_fbdev_restore_mode(tegra->fbdev);
294
#endif
T
Thierry Reding 已提交
295 296
}

297
static struct host1x_bo *
298
host1x_bo_lookup(struct drm_file *file, u32 handle)
299 300 301 302
{
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

303
	gem = drm_gem_object_lookup(file, handle);
304 305 306
	if (!gem)
		return NULL;

307
	drm_gem_object_unreference_unlocked(gem);
308 309 310 311 312

	bo = to_tegra_bo(gem);
	return &bo->base;
}

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
				       struct drm_tegra_reloc __user *src,
				       struct drm_device *drm,
				       struct drm_file *file)
{
	u32 cmdbuf, target;
	int err;

	err = get_user(cmdbuf, &src->cmdbuf.handle);
	if (err < 0)
		return err;

	err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
	if (err < 0)
		return err;

	err = get_user(target, &src->target.handle);
	if (err < 0)
		return err;

333
	err = get_user(dest->target.offset, &src->target.offset);
334 335 336 337 338 339 340
	if (err < 0)
		return err;

	err = get_user(dest->shift, &src->shift);
	if (err < 0)
		return err;

341
	dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
342 343 344
	if (!dest->cmdbuf.bo)
		return -ENOENT;

345
	dest->target.bo = host1x_bo_lookup(file, target);
346 347 348 349 350 351
	if (!dest->target.bo)
		return -ENOENT;

	return 0;
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
					 struct drm_tegra_waitchk __user *src,
					 struct drm_file *file)
{
	u32 cmdbuf;
	int err;

	err = get_user(cmdbuf, &src->handle);
	if (err < 0)
		return err;

	err = get_user(dest->offset, &src->offset);
	if (err < 0)
		return err;

	err = get_user(dest->syncpt_id, &src->syncpt);
	if (err < 0)
		return err;

	err = get_user(dest->thresh, &src->thresh);
	if (err < 0)
		return err;

	dest->bo = host1x_bo_lookup(file, cmdbuf);
	if (!dest->bo)
		return -ENOENT;

	return 0;
}

382 383 384 385 386 387 388 389
int tegra_drm_submit(struct tegra_drm_context *context,
		     struct drm_tegra_submit *args, struct drm_device *drm,
		     struct drm_file *file)
{
	unsigned int num_cmdbufs = args->num_cmdbufs;
	unsigned int num_relocs = args->num_relocs;
	unsigned int num_waitchks = args->num_waitchks;
	struct drm_tegra_cmdbuf __user *cmdbufs =
390
		(void __user *)(uintptr_t)args->cmdbufs;
391
	struct drm_tegra_reloc __user *relocs =
392
		(void __user *)(uintptr_t)args->relocs;
393
	struct drm_tegra_waitchk __user *waitchks =
394
		(void __user *)(uintptr_t)args->waitchks;
395
	struct drm_tegra_syncpt syncpt;
396 397
	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
	struct host1x_syncpt *sp;
398 399 400 401 402 403 404
	struct host1x_job *job;
	int err;

	/* We don't yet support other than one syncpt_incr struct per submit */
	if (args->num_syncpts != 1)
		return -EINVAL;

405 406 407 408
	/* We don't yet support waitchks */
	if (args->num_waitchks != 0)
		return -EINVAL;

409 410 411 412 413 414 415 416 417 418 419 420 421 422
	job = host1x_job_alloc(context->channel, args->num_cmdbufs,
			       args->num_relocs, args->num_waitchks);
	if (!job)
		return -ENOMEM;

	job->num_relocs = args->num_relocs;
	job->num_waitchk = args->num_waitchks;
	job->client = (u32)args->context;
	job->class = context->client->base.class;
	job->serialize = true;

	while (num_cmdbufs) {
		struct drm_tegra_cmdbuf cmdbuf;
		struct host1x_bo *bo;
423 424
		struct tegra_bo *obj;
		u64 offset;
425

426 427
		if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
			err = -EFAULT;
428
			goto fail;
429
		}
430

431 432 433 434 435 436 437 438 439
		/*
		 * The maximum number of CDMA gather fetches is 16383, a higher
		 * value means the words count is malformed.
		 */
		if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
			err = -EINVAL;
			goto fail;
		}

440
		bo = host1x_bo_lookup(file, cmdbuf.handle);
441 442 443 444 445
		if (!bo) {
			err = -ENOENT;
			goto fail;
		}

446 447 448 449 450 451 452 453 454 455 456 457 458
		offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
		obj = host1x_to_tegra_bo(bo);

		/*
		 * Gather buffer base address must be 4-bytes aligned,
		 * unaligned offset is malformed and cause commands stream
		 * corruption on the buffer address relocation.
		 */
		if (offset & 3 || offset >= obj->gem.size) {
			err = -EINVAL;
			goto fail;
		}

459 460 461 462 463
		host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
		num_cmdbufs--;
		cmdbufs++;
	}

464
	/* copy and resolve relocations from submit */
465
	while (num_relocs--) {
466 467 468
		struct host1x_reloc *reloc;
		struct tegra_bo *obj;

469 470 471 472
		err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
						  &relocs[num_relocs], drm,
						  file);
		if (err < 0)
473
			goto fail;
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494

		reloc = &job->relocarray[num_relocs];
		obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);

		/*
		 * The unaligned cmdbuf offset will cause an unaligned write
		 * during of the relocations patching, corrupting the commands
		 * stream.
		 */
		if (reloc->cmdbuf.offset & 3 ||
		    reloc->cmdbuf.offset >= obj->gem.size) {
			err = -EINVAL;
			goto fail;
		}

		obj = host1x_to_tegra_bo(reloc->target.bo);

		if (reloc->target.offset >= obj->gem.size) {
			err = -EINVAL;
			goto fail;
		}
495 496
	}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	/* copy and resolve waitchks from submit */
	while (num_waitchks--) {
		struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
		struct tegra_bo *obj;

		err = host1x_waitchk_copy_from_user(wait,
						    &waitchks[num_waitchks],
						    file);
		if (err < 0)
			goto fail;

		obj = host1x_to_tegra_bo(wait->bo);

		/*
		 * The unaligned offset will cause an unaligned write during
		 * of the waitchks patching, corrupting the commands stream.
		 */
		if (wait->offset & 3 ||
		    wait->offset >= obj->gem.size) {
			err = -EINVAL;
			goto fail;
		}
519
	}
520

521 522 523
	if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
			   sizeof(syncpt))) {
		err = -EFAULT;
524
		goto fail;
525
	}
526

527 528 529 530 531 532 533
	/* check whether syncpoint ID is valid */
	sp = host1x_syncpt_get(host1x, syncpt.id);
	if (!sp) {
		err = -ENOENT;
		goto fail;
	}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	job->is_addr_reg = context->client->ops->is_addr_reg;
	job->syncpt_incrs = syncpt.incrs;
	job->syncpt_id = syncpt.id;
	job->timeout = 10000;

	if (args->timeout && args->timeout < 10000)
		job->timeout = args->timeout;

	err = host1x_job_pin(job, context->client->base.dev);
	if (err)
		goto fail;

	err = host1x_job_submit(job);
	if (err)
		goto fail_submit;

	args->fence = job->syncpt_end;

	host1x_job_put(job);
	return 0;

fail_submit:
	host1x_job_unpin(job);
fail:
	host1x_job_put(job);
	return err;
}


T
Terje Bergstrom 已提交
563
#ifdef CONFIG_DRM_TEGRA_STAGING
564 565
static struct tegra_drm_context *
tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
T
Terje Bergstrom 已提交
566
{
567
	struct tegra_drm_context *context;
T
Terje Bergstrom 已提交
568

569 570 571
	mutex_lock(&file->lock);
	context = idr_find(&file->contexts, id);
	mutex_unlock(&file->lock);
T
Terje Bergstrom 已提交
572

573
	return context;
T
Terje Bergstrom 已提交
574 575 576 577 578 579 580 581
}

static int tegra_gem_create(struct drm_device *drm, void *data,
			    struct drm_file *file)
{
	struct drm_tegra_gem_create *args = data;
	struct tegra_bo *bo;

582
	bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
T
Terje Bergstrom 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596
					 &args->handle);
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	return 0;
}

static int tegra_gem_mmap(struct drm_device *drm, void *data,
			  struct drm_file *file)
{
	struct drm_tegra_gem_mmap *args = data;
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

597
	gem = drm_gem_object_lookup(file, args->handle);
T
Terje Bergstrom 已提交
598 599 600 601 602
	if (!gem)
		return -EINVAL;

	bo = to_tegra_bo(gem);

603
	args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
T
Terje Bergstrom 已提交
604

605
	drm_gem_object_unreference_unlocked(gem);
T
Terje Bergstrom 已提交
606 607 608 609 610 611 612

	return 0;
}

static int tegra_syncpt_read(struct drm_device *drm, void *data,
			     struct drm_file *file)
{
613
	struct host1x *host = dev_get_drvdata(drm->dev->parent);
T
Terje Bergstrom 已提交
614
	struct drm_tegra_syncpt_read *args = data;
615
	struct host1x_syncpt *sp;
T
Terje Bergstrom 已提交
616

617
	sp = host1x_syncpt_get(host, args->id);
T
Terje Bergstrom 已提交
618 619 620 621 622 623 624 625 626 627
	if (!sp)
		return -EINVAL;

	args->value = host1x_syncpt_read_min(sp);
	return 0;
}

static int tegra_syncpt_incr(struct drm_device *drm, void *data,
			     struct drm_file *file)
{
628
	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
T
Terje Bergstrom 已提交
629
	struct drm_tegra_syncpt_incr *args = data;
630
	struct host1x_syncpt *sp;
T
Terje Bergstrom 已提交
631

632
	sp = host1x_syncpt_get(host1x, args->id);
T
Terje Bergstrom 已提交
633 634 635
	if (!sp)
		return -EINVAL;

636
	return host1x_syncpt_incr(sp);
T
Terje Bergstrom 已提交
637 638 639 640 641
}

static int tegra_syncpt_wait(struct drm_device *drm, void *data,
			     struct drm_file *file)
{
642
	struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
T
Terje Bergstrom 已提交
643
	struct drm_tegra_syncpt_wait *args = data;
644
	struct host1x_syncpt *sp;
T
Terje Bergstrom 已提交
645

646
	sp = host1x_syncpt_get(host1x, args->id);
T
Terje Bergstrom 已提交
647 648 649 650 651 652 653
	if (!sp)
		return -EINVAL;

	return host1x_syncpt_wait(sp, args->thresh, args->timeout,
				  &args->value);
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
static int tegra_client_open(struct tegra_drm_file *fpriv,
			     struct tegra_drm_client *client,
			     struct tegra_drm_context *context)
{
	int err;

	err = client->ops->open_channel(client, context);
	if (err < 0)
		return err;

	err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL);
	if (err < 0) {
		client->ops->close_channel(context);
		return err;
	}

	context->client = client;
	context->id = err;

	return 0;
}

T
Terje Bergstrom 已提交
676 677 678
static int tegra_open_channel(struct drm_device *drm, void *data,
			      struct drm_file *file)
{
679
	struct tegra_drm_file *fpriv = file->driver_priv;
680
	struct tegra_drm *tegra = drm->dev_private;
T
Terje Bergstrom 已提交
681
	struct drm_tegra_open_channel *args = data;
682
	struct tegra_drm_context *context;
683
	struct tegra_drm_client *client;
T
Terje Bergstrom 已提交
684 685 686 687 688 689
	int err = -ENODEV;

	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
		return -ENOMEM;

690 691
	mutex_lock(&fpriv->lock);

692
	list_for_each_entry(client, &tegra->clients, list)
693
		if (client->base.class == args->client) {
694 695
			err = tegra_client_open(fpriv, client, context);
			if (err < 0)
T
Terje Bergstrom 已提交
696 697
				break;

698 699
			args->context = context->id;
			break;
T
Terje Bergstrom 已提交
700 701
		}

702 703 704 705
	if (err < 0)
		kfree(context);

	mutex_unlock(&fpriv->lock);
T
Terje Bergstrom 已提交
706 707 708 709 710 711
	return err;
}

static int tegra_close_channel(struct drm_device *drm, void *data,
			       struct drm_file *file)
{
712
	struct tegra_drm_file *fpriv = file->driver_priv;
713
	struct drm_tegra_close_channel *args = data;
714
	struct tegra_drm_context *context;
715
	int err = 0;
716

717
	mutex_lock(&fpriv->lock);
T
Terje Bergstrom 已提交
718

719 720 721 722 723
	context = tegra_drm_file_get_context(fpriv, args->context);
	if (!context) {
		err = -EINVAL;
		goto unlock;
	}
T
Terje Bergstrom 已提交
724

725
	idr_remove(&fpriv->contexts, context->id);
726
	tegra_drm_context_free(context);
T
Terje Bergstrom 已提交
727

728 729 730
unlock:
	mutex_unlock(&fpriv->lock);
	return err;
T
Terje Bergstrom 已提交
731 732 733 734 735
}

static int tegra_get_syncpt(struct drm_device *drm, void *data,
			    struct drm_file *file)
{
736
	struct tegra_drm_file *fpriv = file->driver_priv;
T
Terje Bergstrom 已提交
737
	struct drm_tegra_get_syncpt *args = data;
738
	struct tegra_drm_context *context;
T
Terje Bergstrom 已提交
739
	struct host1x_syncpt *syncpt;
740
	int err = 0;
T
Terje Bergstrom 已提交
741

742
	mutex_lock(&fpriv->lock);
743

744 745 746 747 748
	context = tegra_drm_file_get_context(fpriv, args->context);
	if (!context) {
		err = -ENODEV;
		goto unlock;
	}
T
Terje Bergstrom 已提交
749

750 751 752 753
	if (args->index >= context->client->base.num_syncpts) {
		err = -EINVAL;
		goto unlock;
	}
T
Terje Bergstrom 已提交
754

755
	syncpt = context->client->base.syncpts[args->index];
T
Terje Bergstrom 已提交
756 757
	args->id = host1x_syncpt_id(syncpt);

758 759 760
unlock:
	mutex_unlock(&fpriv->lock);
	return err;
T
Terje Bergstrom 已提交
761 762 763 764 765
}

static int tegra_submit(struct drm_device *drm, void *data,
			struct drm_file *file)
{
766
	struct tegra_drm_file *fpriv = file->driver_priv;
T
Terje Bergstrom 已提交
767
	struct drm_tegra_submit *args = data;
768
	struct tegra_drm_context *context;
769
	int err;
770

771
	mutex_lock(&fpriv->lock);
T
Terje Bergstrom 已提交
772

773 774 775 776 777
	context = tegra_drm_file_get_context(fpriv, args->context);
	if (!context) {
		err = -ENODEV;
		goto unlock;
	}
T
Terje Bergstrom 已提交
778

779
	err = context->client->ops->submit(context, args, drm, file);
T
Terje Bergstrom 已提交
780

781 782 783
unlock:
	mutex_unlock(&fpriv->lock);
	return err;
T
Terje Bergstrom 已提交
784
}
785 786 787 788 789 790 791 792 793

static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
				 struct drm_file *file)
{
	struct tegra_drm_file *fpriv = file->driver_priv;
	struct drm_tegra_get_syncpt_base *args = data;
	struct tegra_drm_context *context;
	struct host1x_syncpt_base *base;
	struct host1x_syncpt *syncpt;
794
	int err = 0;
795

796
	mutex_lock(&fpriv->lock);
797

798 799 800 801 802
	context = tegra_drm_file_get_context(fpriv, args->context);
	if (!context) {
		err = -ENODEV;
		goto unlock;
	}
803

804 805 806 807
	if (args->syncpt >= context->client->base.num_syncpts) {
		err = -EINVAL;
		goto unlock;
	}
808 809 810 811

	syncpt = context->client->base.syncpts[args->syncpt];

	base = host1x_syncpt_get_base(syncpt);
812 813 814 815
	if (!base) {
		err = -ENXIO;
		goto unlock;
	}
816 817 818

	args->id = host1x_syncpt_base_id(base);

819 820 821
unlock:
	mutex_unlock(&fpriv->lock);
	return err;
822
}
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862

static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
				struct drm_file *file)
{
	struct drm_tegra_gem_set_tiling *args = data;
	enum tegra_bo_tiling_mode mode;
	struct drm_gem_object *gem;
	unsigned long value = 0;
	struct tegra_bo *bo;

	switch (args->mode) {
	case DRM_TEGRA_GEM_TILING_MODE_PITCH:
		mode = TEGRA_BO_TILING_MODE_PITCH;

		if (args->value != 0)
			return -EINVAL;

		break;

	case DRM_TEGRA_GEM_TILING_MODE_TILED:
		mode = TEGRA_BO_TILING_MODE_TILED;

		if (args->value != 0)
			return -EINVAL;

		break;

	case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
		mode = TEGRA_BO_TILING_MODE_BLOCK;

		if (args->value > 5)
			return -EINVAL;

		value = args->value;
		break;

	default:
		return -EINVAL;
	}

863
	gem = drm_gem_object_lookup(file, args->handle);
864 865 866 867 868 869 870 871
	if (!gem)
		return -ENOENT;

	bo = to_tegra_bo(gem);

	bo->tiling.mode = mode;
	bo->tiling.value = value;

872
	drm_gem_object_unreference_unlocked(gem);
873 874 875 876 877 878 879 880 881 882 883 884

	return 0;
}

static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
				struct drm_file *file)
{
	struct drm_tegra_gem_get_tiling *args = data;
	struct drm_gem_object *gem;
	struct tegra_bo *bo;
	int err = 0;

885
	gem = drm_gem_object_lookup(file, args->handle);
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
	if (!gem)
		return -ENOENT;

	bo = to_tegra_bo(gem);

	switch (bo->tiling.mode) {
	case TEGRA_BO_TILING_MODE_PITCH:
		args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
		args->value = 0;
		break;

	case TEGRA_BO_TILING_MODE_TILED:
		args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
		args->value = 0;
		break;

	case TEGRA_BO_TILING_MODE_BLOCK:
		args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
		args->value = bo->tiling.value;
		break;

	default:
		err = -EINVAL;
		break;
	}

912
	drm_gem_object_unreference_unlocked(gem);
913 914 915

	return err;
}
916 917 918 919 920 921 922 923 924 925 926

static int tegra_gem_set_flags(struct drm_device *drm, void *data,
			       struct drm_file *file)
{
	struct drm_tegra_gem_set_flags *args = data;
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

	if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
		return -EINVAL;

927
	gem = drm_gem_object_lookup(file, args->handle);
928 929 930 931 932 933 934 935 936
	if (!gem)
		return -ENOENT;

	bo = to_tegra_bo(gem);
	bo->flags = 0;

	if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
		bo->flags |= TEGRA_BO_BOTTOM_UP;

937
	drm_gem_object_unreference_unlocked(gem);
938 939 940 941 942 943 944 945 946 947 948

	return 0;
}

static int tegra_gem_get_flags(struct drm_device *drm, void *data,
			       struct drm_file *file)
{
	struct drm_tegra_gem_get_flags *args = data;
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

949
	gem = drm_gem_object_lookup(file, args->handle);
950 951 952 953 954 955 956 957 958
	if (!gem)
		return -ENOENT;

	bo = to_tegra_bo(gem);
	args->flags = 0;

	if (bo->flags & TEGRA_BO_BOTTOM_UP)
		args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;

959
	drm_gem_object_unreference_unlocked(gem);
960 961 962

	return 0;
}
T
Terje Bergstrom 已提交
963 964
#endif

R
Rob Clark 已提交
965
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
T
Terje Bergstrom 已提交
966
#ifdef CONFIG_DRM_TEGRA_STAGING
967 968 969 970 971 972 973 974 975 976 977 978 979 980
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
	DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
T
Terje Bergstrom 已提交
981
#endif
T
Thierry Reding 已提交
982 983 984 985 986 987 988
};

static const struct file_operations tegra_drm_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = drm_ioctl,
989
	.mmap = tegra_drm_mmap,
T
Thierry Reding 已提交
990 991 992 993 994 995
	.poll = drm_poll,
	.read = drm_read,
	.compat_ioctl = drm_compat_ioctl,
	.llseek = noop_llseek,
};

996 997 998 999 1000 1001 1002 1003 1004
static int tegra_drm_context_cleanup(int id, void *p, void *data)
{
	struct tegra_drm_context *context = p;

	tegra_drm_context_free(context);

	return 0;
}

1005 1006
static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{
1007
	struct tegra_drm_file *fpriv = file->driver_priv;
1008

1009 1010 1011
	mutex_lock(&fpriv->lock);
	idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
	mutex_unlock(&fpriv->lock);
T
Terje Bergstrom 已提交
1012

1013 1014
	idr_destroy(&fpriv->contexts);
	mutex_destroy(&fpriv->lock);
T
Terje Bergstrom 已提交
1015
	kfree(fpriv);
1016 1017
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
#ifdef CONFIG_DEBUG_FS
static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)s->private;
	struct drm_device *drm = node->minor->dev;
	struct drm_framebuffer *fb;

	mutex_lock(&drm->mode_config.fb_lock);

	list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
		seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
V
Ville Syrjälä 已提交
1029 1030
			   fb->base.id, fb->width, fb->height,
			   fb->format->depth,
V
Ville Syrjälä 已提交
1031
			   fb->format->cpp[0] * 8,
1032
			   drm_framebuffer_read_refcount(fb));
1033 1034 1035 1036 1037 1038 1039
	}

	mutex_unlock(&drm->mode_config.fb_lock);

	return 0;
}

1040 1041 1042 1043 1044
static int tegra_debugfs_iova(struct seq_file *s, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)s->private;
	struct drm_device *drm = node->minor->dev;
	struct tegra_drm *tegra = drm->dev_private;
D
Daniel Vetter 已提交
1045
	struct drm_printer p = drm_seq_file_printer(s);
1046

1047
	mutex_lock(&tegra->mm_lock);
D
Daniel Vetter 已提交
1048
	drm_mm_print(&tegra->mm, &p);
1049
	mutex_unlock(&tegra->mm_lock);
D
Daniel Vetter 已提交
1050 1051

	return 0;
1052 1053
}

1054 1055
static struct drm_info_list tegra_debugfs_list[] = {
	{ "framebuffers", tegra_debugfs_framebuffers, 0 },
1056
	{ "iova", tegra_debugfs_iova, 0 },
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
};

static int tegra_debugfs_init(struct drm_minor *minor)
{
	return drm_debugfs_create_files(tegra_debugfs_list,
					ARRAY_SIZE(tegra_debugfs_list),
					minor->debugfs_root, minor);
}
#endif

1067
static struct drm_driver tegra_drm_driver = {
1068 1069
	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
			   DRIVER_ATOMIC,
T
Thierry Reding 已提交
1070 1071 1072
	.load = tegra_drm_load,
	.unload = tegra_drm_unload,
	.open = tegra_drm_open,
1073
	.preclose = tegra_drm_preclose,
T
Thierry Reding 已提交
1074 1075
	.lastclose = tegra_drm_lastclose,

1076 1077 1078 1079
#if defined(CONFIG_DEBUG_FS)
	.debugfs_init = tegra_debugfs_init,
#endif

1080
	.gem_free_object_unlocked = tegra_bo_free_object,
1081
	.gem_vm_ops = &tegra_bo_vm_ops,
T
Thierry Reding 已提交
1082 1083 1084 1085 1086 1087

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_export = tegra_gem_prime_export,
	.gem_prime_import = tegra_gem_prime_import,

1088 1089
	.dumb_create = tegra_bo_dumb_create,
	.dumb_map_offset = tegra_bo_dumb_map_offset,
1090
	.dumb_destroy = drm_gem_dumb_destroy,
T
Thierry Reding 已提交
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

	.ioctls = tegra_drm_ioctls,
	.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
	.fops = &tegra_drm_fops,

	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
	.patchlevel = DRIVER_PATCHLEVEL,
};
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

int tegra_drm_register_client(struct tegra_drm *tegra,
			      struct tegra_drm_client *client)
{
	mutex_lock(&tegra->clients_lock);
	list_add_tail(&client->list, &tegra->clients);
	mutex_unlock(&tegra->clients_lock);

	return 0;
}

int tegra_drm_unregister_client(struct tegra_drm *tegra,
				struct tegra_drm_client *client)
{
	mutex_lock(&tegra->clients_lock);
	list_del_init(&client->list);
	mutex_unlock(&tegra->clients_lock);

	return 0;
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
			      dma_addr_t *dma)
{
	struct iova *alloc;
	void *virt;
	gfp_t gfp;
	int err;

	if (tegra->domain)
		size = iova_align(&tegra->carveout.domain, size);
	else
		size = PAGE_ALIGN(size);

	gfp = GFP_KERNEL | __GFP_ZERO;
	if (!tegra->domain) {
		/*
		 * Many units only support 32-bit addresses, even on 64-bit
		 * SoCs. If there is no IOMMU to translate into a 32-bit IO
		 * virtual address space, force allocations to be in the
		 * lower 32-bit range.
		 */
		gfp |= GFP_DMA;
	}

	virt = (void *)__get_free_pages(gfp, get_order(size));
	if (!virt)
		return ERR_PTR(-ENOMEM);

	if (!tegra->domain) {
		/*
		 * If IOMMU is disabled, devices address physical memory
		 * directly.
		 */
		*dma = virt_to_phys(virt);
		return virt;
	}

	alloc = alloc_iova(&tegra->carveout.domain,
			   size >> tegra->carveout.shift,
			   tegra->carveout.limit, true);
	if (!alloc) {
		err = -EBUSY;
		goto free_pages;
	}

	*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
	err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
			size, IOMMU_READ | IOMMU_WRITE);
	if (err < 0)
		goto free_iova;

	return virt;

free_iova:
	__free_iova(&tegra->carveout.domain, alloc);
free_pages:
	free_pages((unsigned long)virt, get_order(size));

	return ERR_PTR(err);
}

void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
		    dma_addr_t dma)
{
	if (tegra->domain)
		size = iova_align(&tegra->carveout.domain, size);
	else
		size = PAGE_ALIGN(size);

	if (tegra->domain) {
		iommu_unmap(tegra->domain, dma, size);
		free_iova(&tegra->carveout.domain,
			  iova_pfn(&tegra->carveout.domain, dma));
	}

	free_pages((unsigned long)virt, get_order(size));
}

1202
static int host1x_drm_probe(struct host1x_device *dev)
1203
{
1204 1205 1206 1207 1208
	struct drm_driver *driver = &tegra_drm_driver;
	struct drm_device *drm;
	int err;

	drm = drm_dev_alloc(driver, &dev->dev);
1209 1210
	if (IS_ERR(drm))
		return PTR_ERR(drm);
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222

	dev_set_drvdata(&dev->dev, drm);

	err = drm_dev_register(drm, 0);
	if (err < 0)
		goto unref;

	return 0;

unref:
	drm_dev_unref(drm);
	return err;
1223 1224
}

1225
static int host1x_drm_remove(struct host1x_device *dev)
1226
{
1227 1228 1229 1230
	struct drm_device *drm = dev_get_drvdata(&dev->dev);

	drm_dev_unregister(drm);
	drm_dev_unref(drm);
1231 1232 1233 1234

	return 0;
}

1235 1236 1237 1238
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
	struct drm_device *drm = dev_get_drvdata(dev);
1239
	struct tegra_drm *tegra = drm->dev_private;
1240 1241

	drm_kms_helper_poll_disable(drm);
1242 1243 1244 1245 1246 1247 1248 1249
	tegra_drm_fb_suspend(drm);

	tegra->state = drm_atomic_helper_suspend(drm);
	if (IS_ERR(tegra->state)) {
		tegra_drm_fb_resume(drm);
		drm_kms_helper_poll_enable(drm);
		return PTR_ERR(tegra->state);
	}
1250 1251 1252 1253 1254 1255 1256

	return 0;
}

static int host1x_drm_resume(struct device *dev)
{
	struct drm_device *drm = dev_get_drvdata(dev);
1257
	struct tegra_drm *tegra = drm->dev_private;
1258

1259 1260
	drm_atomic_helper_resume(drm, tegra->state);
	tegra_drm_fb_resume(drm);
1261 1262 1263 1264 1265 1266
	drm_kms_helper_poll_enable(drm);

	return 0;
}
#endif

1267 1268
static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
			 host1x_drm_resume);
1269

1270 1271 1272 1273
static const struct of_device_id host1x_drm_subdevs[] = {
	{ .compatible = "nvidia,tegra20-dc", },
	{ .compatible = "nvidia,tegra20-hdmi", },
	{ .compatible = "nvidia,tegra20-gr2d", },
T
Thierry Reding 已提交
1274
	{ .compatible = "nvidia,tegra20-gr3d", },
1275 1276 1277
	{ .compatible = "nvidia,tegra30-dc", },
	{ .compatible = "nvidia,tegra30-hdmi", },
	{ .compatible = "nvidia,tegra30-gr2d", },
T
Thierry Reding 已提交
1278
	{ .compatible = "nvidia,tegra30-gr3d", },
T
Thierry Reding 已提交
1279
	{ .compatible = "nvidia,tegra114-dsi", },
1280
	{ .compatible = "nvidia,tegra114-hdmi", },
T
Thierry Reding 已提交
1281
	{ .compatible = "nvidia,tegra114-gr3d", },
1282
	{ .compatible = "nvidia,tegra124-dc", },
T
Thierry Reding 已提交
1283
	{ .compatible = "nvidia,tegra124-sor", },
1284
	{ .compatible = "nvidia,tegra124-hdmi", },
1285
	{ .compatible = "nvidia,tegra124-dsi", },
A
Arto Merilainen 已提交
1286
	{ .compatible = "nvidia,tegra124-vic", },
1287
	{ .compatible = "nvidia,tegra132-dsi", },
1288
	{ .compatible = "nvidia,tegra210-dc", },
1289
	{ .compatible = "nvidia,tegra210-dsi", },
1290
	{ .compatible = "nvidia,tegra210-sor", },
1291
	{ .compatible = "nvidia,tegra210-sor1", },
A
Arto Merilainen 已提交
1292
	{ .compatible = "nvidia,tegra210-vic", },
1293 1294 1295 1296
	{ /* sentinel */ }
};

static struct host1x_driver host1x_drm_driver = {
1297 1298
	.driver = {
		.name = "drm",
1299
		.pm = &host1x_drm_pm_ops,
1300
	},
1301 1302 1303 1304 1305
	.probe = host1x_drm_probe,
	.remove = host1x_drm_remove,
	.subdevs = host1x_drm_subdevs,
};

1306 1307 1308 1309 1310 1311 1312 1313
static struct platform_driver * const drivers[] = {
	&tegra_dc_driver,
	&tegra_hdmi_driver,
	&tegra_dsi_driver,
	&tegra_dpaux_driver,
	&tegra_sor_driver,
	&tegra_gr2d_driver,
	&tegra_gr3d_driver,
A
Arto Merilainen 已提交
1314
	&tegra_vic_driver,
1315 1316
};

1317 1318 1319 1320 1321 1322 1323 1324
static int __init host1x_drm_init(void)
{
	int err;

	err = host1x_driver_register(&host1x_drm_driver);
	if (err < 0)
		return err;

1325
	err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
	if (err < 0)
		goto unregister_host1x;

	return 0;

unregister_host1x:
	host1x_driver_unregister(&host1x_drm_driver);
	return err;
}
module_init(host1x_drm_init);

static void __exit host1x_drm_exit(void)
{
1339
	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1340 1341 1342 1343 1344 1345 1346
	host1x_driver_unregister(&host1x_drm_driver);
}
module_exit(host1x_drm_exit);

MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
MODULE_LICENSE("GPL v2");