drm_fb_cma_helper.c 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * drm kms/fb cma (contiguous memory allocator) helper functions
 *
 * Copyright (C) 2012 Analog Device Inc.
 *   Author: Lars-Peter Clausen <lars@metafoo.de>
 *
 * Based on udl_fbdev.c
 *  Copyright (C) 2012 Red Hat
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <drm/drmP.h>
21
#include <drm/drm_atomic.h>
22 23 24 25 26
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
27
#include <linux/dma-buf.h>
28
#include <linux/dma-mapping.h>
29
#include <linux/module.h>
30
#include <linux/reservation.h>
31

32 33
#define DEFAULT_FBDEFIO_DELAY_MS 50

34 35 36 37 38 39 40 41
struct drm_fb_cma {
	struct drm_framebuffer		fb;
	struct drm_gem_cma_object	*obj[4];
};

struct drm_fbdev_cma {
	struct drm_fb_helper	fb_helper;
	struct drm_fb_cma	*fb;
42
	const struct drm_framebuffer_funcs *fb_funcs;
43 44
};

45 46 47 48 49 50
/**
 * DOC: framebuffer cma helper functions
 *
 * Provides helper functions for creating a cma (contiguous memory allocator)
 * backed framebuffer.
 *
51
 * drm_fb_cma_create() is used in the &drm_mode_config_funcs.fb_create
52
 * callback function to create a cma backed framebuffer.
53 54 55
 *
 * An fbdev framebuffer backed by cma is also available by calling
 * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
56 57 58
 * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
 * set up automatically. &drm_framebuffer_funcs.dirty is called by
 * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
59
 *
60
 * Example fbdev deferred io code::
61
 *
62 63 64 65 66
 *     static int driver_fb_dirty(struct drm_framebuffer *fb,
 *                                struct drm_file *file_priv,
 *                                unsigned flags, unsigned color,
 *                                struct drm_clip_rect *clips,
 *                                unsigned num_clips)
67 68 69 70 71 72
 *     {
 *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
 *         ... push changes ...
 *         return 0;
 *     }
 *
73
 *     static struct drm_framebuffer_funcs driver_fb_funcs = {
74 75
 *         .destroy       = drm_fb_cma_destroy,
 *         .create_handle = drm_fb_cma_create_handle,
76
 *         .dirty         = driver_fb_dirty,
77 78
 *     };
 *
79
 * Initialize::
80 81 82 83
 *
 *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
 *                                           dev->mode_config.num_crtc,
 *                                           dev->mode_config.num_connector,
84
 *                                           &driver_fb_funcs);
85 86 87
 *
 */

88 89 90 91 92 93 94 95 96 97
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
{
	return container_of(helper, struct drm_fbdev_cma, fb_helper);
}

static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
{
	return container_of(fb, struct drm_fb_cma, fb);
}

98
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
99 100 101 102 103 104 105 106 107 108 109 110
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
	int i;

	for (i = 0; i < 4; i++) {
		if (fb_cma->obj[i])
			drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
	}

	drm_framebuffer_cleanup(fb);
	kfree(fb_cma);
}
111
EXPORT_SYMBOL(drm_fb_cma_destroy);
112

113
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
114 115 116 117 118 119 120
	struct drm_file *file_priv, unsigned int *handle)
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);

	return drm_gem_handle_create(file_priv,
			&fb_cma->obj[0]->base, handle);
}
121
EXPORT_SYMBOL(drm_fb_cma_create_handle);
122 123 124 125 126 127 128

static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
	.destroy	= drm_fb_cma_destroy,
	.create_handle	= drm_fb_cma_create_handle,
};

static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
129 130
	const struct drm_mode_fb_cmd2 *mode_cmd,
	struct drm_gem_cma_object **obj,
131
	unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
132 133 134 135 136 137 138 139 140
{
	struct drm_fb_cma *fb_cma;
	int ret;
	int i;

	fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
	if (!fb_cma)
		return ERR_PTR(-ENOMEM);

141
	drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
142 143 144 145

	for (i = 0; i < num_planes; i++)
		fb_cma->obj[i] = obj[i];

146
	ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
147
	if (ret) {
M
Masanari Iida 已提交
148
		dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
149 150 151 152 153 154 155 156
		kfree(fb_cma);
		return ERR_PTR(ret);
	}

	return fb_cma;
}

/**
157
 * drm_fb_cma_create_with_funcs() - helper function for the
158 159
 *                                  &drm_mode_config_funcs.fb_create
 *                                  callback
160 161 162 163
 * @dev: DRM device
 * @file_priv: drm file for the ioctl call
 * @mode_cmd: metadata from the userspace fb creation request
 * @funcs: vtable to be used for the new framebuffer object
164
 *
165
 * This can be used to set &drm_framebuffer_funcs for drivers that need the
166 167
 * &drm_framebuffer_funcs.dirty callback. Use drm_fb_cma_create() if you don't
 * need to change &drm_framebuffer_funcs.
168
 */
169 170 171
struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
	const struct drm_framebuffer_funcs *funcs)
172
{
173
	const struct drm_format_info *info;
174 175 176 177 178 179
	struct drm_fb_cma *fb_cma;
	struct drm_gem_cma_object *objs[4];
	struct drm_gem_object *obj;
	int ret;
	int i;

180 181 182
	info = drm_format_info(mode_cmd->pixel_format);
	if (!info)
		return ERR_PTR(-EINVAL);
183

184 185 186
	for (i = 0; i < info->num_planes; i++) {
		unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
		unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
187 188
		unsigned int min_size;

189
		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
190 191 192 193 194 195 196
		if (!obj) {
			dev_err(dev->dev, "Failed to lookup GEM object\n");
			ret = -ENXIO;
			goto err_gem_object_unreference;
		}

		min_size = (height - 1) * mode_cmd->pitches[i]
197
			 + width * info->cpp[i]
198 199 200 201 202 203 204 205 206 207
			 + mode_cmd->offsets[i];

		if (obj->size < min_size) {
			drm_gem_object_unreference_unlocked(obj);
			ret = -EINVAL;
			goto err_gem_object_unreference;
		}
		objs[i] = to_drm_gem_cma_obj(obj);
	}

208
	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
209 210 211 212 213 214 215 216 217 218 219 220
	if (IS_ERR(fb_cma)) {
		ret = PTR_ERR(fb_cma);
		goto err_gem_object_unreference;
	}

	return &fb_cma->fb;

err_gem_object_unreference:
	for (i--; i >= 0; i--)
		drm_gem_object_unreference_unlocked(&objs[i]->base);
	return ERR_PTR(ret);
}
221 222 223
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);

/**
224
 * drm_fb_cma_create() - &drm_mode_config_funcs.fb_create callback function
225 226 227
 * @dev: DRM device
 * @file_priv: drm file for the ioctl call
 * @mode_cmd: metadata from the userspace fb creation request
228 229 230
 *
 * If your hardware has special alignment or pitch requirements these should be
 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
231
 * you need to set &drm_framebuffer_funcs.dirty.
232 233 234 235 236 237 238
 */
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
	return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
					    &drm_fb_cma_funcs);
}
239 240 241 242 243 244 245 246 247 248 249 250
EXPORT_SYMBOL_GPL(drm_fb_cma_create);

/**
 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
 * @fb: The framebuffer
 * @plane: Which plane
 *
 * Return the CMA GEM object for given framebuffer.
 *
 * This function will usually be called from the CRTC callback functions.
 */
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
251
						  unsigned int plane)
252 253 254 255 256 257 258 259 260 261
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);

	if (plane >= 4)
		return NULL;

	return fb_cma->obj[plane];
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);

262 263 264 265 266
/**
 * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
 * @plane: Which plane
 * @state: Plane state attach fence to
 *
267
 * This should be set as the &struct drm_plane_helper_funcs.prepare_fb hook.
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
 *
 * This function checks if the plane FB has an dma-buf attached, extracts
 * the exclusive fence and attaches it to plane state for the atomic helper
 * to wait on.
 *
 * There is no need for cleanup_fb for CMA based framebuffer drivers.
 */
int drm_fb_cma_prepare_fb(struct drm_plane *plane,
			  struct drm_plane_state *state)
{
	struct dma_buf *dma_buf;
	struct dma_fence *fence;

	if ((plane->state->fb == state->fb) || !state->fb)
		return 0;

	dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
	if (dma_buf) {
		fence = reservation_object_get_excl_rcu(dma_buf->resv);
		drm_atomic_set_fence_for_plane(state, fence);
	}

	return 0;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);

R
Rob Clark 已提交
294
#ifdef CONFIG_DEBUG_FS
295
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
R
Rob Clark 已提交
296 297
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
298
	int i;
R
Rob Clark 已提交
299 300

	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
V
Ville Syrjälä 已提交
301
			(char *)&fb->format->format);
R
Rob Clark 已提交
302

303
	for (i = 0; i < fb->format->num_planes; i++) {
R
Rob Clark 已提交
304 305 306 307 308 309 310 311
		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
				i, fb->offsets[i], fb->pitches[i]);
		drm_gem_cma_describe(fb_cma->obj[i], m);
	}
}

/**
 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
312 313 314
 *			       in debugfs.
 * @m: output file
 * @arg: private data for the callback
R
Rob Clark 已提交
315 316 317 318 319 320 321
 */
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_framebuffer *fb;

322
	mutex_lock(&dev->mode_config.fb_lock);
323
	drm_for_each_fb(fb, dev)
R
Rob Clark 已提交
324
		drm_fb_cma_describe(fb, m);
325
	mutex_unlock(&dev->mode_config.fb_lock);
R
Rob Clark 已提交
326 327 328 329 330 331

	return 0;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
#endif

332 333 334 335 336 337
static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	return dma_mmap_writecombine(info->device, vma, info->screen_base,
				     info->fix.smem_start, info->fix.smem_len);
}

338 339
static struct fb_ops drm_fbdev_cma_ops = {
	.owner		= THIS_MODULE,
340
	DRM_FB_HELPER_DEFAULT_OPS,
341 342 343
	.fb_fillrect	= drm_fb_helper_sys_fillrect,
	.fb_copyarea	= drm_fb_helper_sys_copyarea,
	.fb_imageblit	= drm_fb_helper_sys_imageblit,
344
	.fb_mmap	= drm_fb_cma_mmap,
345 346
};

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
					  struct vm_area_struct *vma)
{
	fb_deferred_io_mmap(info, vma);
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

	return 0;
}

static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
				    struct drm_gem_cma_object *cma_obj)
{
	struct fb_deferred_io *fbdefio;
	struct fb_ops *fbops;

	/*
	 * Per device structures are needed because:
	 * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
	 * fbdefio: individual delays
	 */
	fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
	fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
	if (!fbdefio || !fbops) {
		kfree(fbdefio);
S
Sudip Mukherjee 已提交
371
		kfree(fbops);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
		return -ENOMEM;
	}

	/* can't be offset from vaddr since dirty() uses cma_obj */
	fbi->screen_buffer = cma_obj->vaddr;
	/* fb_deferred_io_fault() needs a physical address */
	fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));

	*fbops = *fbi->fbops;
	fbi->fbops = fbops;

	fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
	fbdefio->deferred_io = drm_fb_helper_deferred_io;
	fbi->fbdefio = fbdefio;
	fb_deferred_io_init(fbi);
	fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;

	return 0;
}

static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
{
	if (!fbi->fbdefio)
		return;

	fb_deferred_io_cleanup(fbi);
	kfree(fbi->fbdefio);
	kfree(fbi->fbops);
}

402 403 404
static int
drm_fbdev_cma_create(struct drm_fb_helper *helper,
	struct drm_fb_helper_surface_size *sizes)
405 406 407 408 409 410 411 412 413 414 415 416
{
	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
	struct drm_device *dev = helper->dev;
	struct drm_gem_cma_object *obj;
	struct drm_framebuffer *fb;
	unsigned int bytes_per_pixel;
	unsigned long offset;
	struct fb_info *fbi;
	size_t size;
	int ret;

417
	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
418 419 420 421 422 423 424 425 426 427 428 429 430
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
		sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	obj = drm_gem_cma_create(dev, size);
431
	if (IS_ERR(obj))
432 433
		return -ENOMEM;

434 435 436
	fbi = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(fbi)) {
		ret = PTR_ERR(fbi);
437
		goto err_gem_free_object;
438 439
	}

440 441
	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
					 fbdev_cma->fb_funcs);
442 443 444
	if (IS_ERR(fbdev_cma->fb)) {
		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
		ret = PTR_ERR(fbdev_cma->fb);
445
		goto err_fb_info_destroy;
446 447 448 449 450 451 452 453 454
	}

	fb = &fbdev_cma->fb->fb;
	helper->fb = fb;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &drm_fbdev_cma_ops;

V
Ville Syrjälä 已提交
455
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
R
Rob Clark 已提交
456
	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
457 458 459 460 461 462 463 464 465 466

	offset = fbi->var.xoffset * bytes_per_pixel;
	offset += fbi->var.yoffset * fb->pitches[0];

	dev->mode_config.fb_base = (resource_size_t)obj->paddr;
	fbi->screen_base = obj->vaddr + offset;
	fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
	fbi->screen_size = size;
	fbi->fix.smem_len = size;

467
	if (fbdev_cma->fb_funcs->dirty) {
468 469 470 471 472
		ret = drm_fbdev_cma_defio_init(fbi, obj);
		if (ret)
			goto err_cma_destroy;
	}

473 474
	return 0;

475
err_cma_destroy:
476
	drm_framebuffer_remove(&fbdev_cma->fb->fb);
477 478
err_fb_info_destroy:
	drm_fb_helper_release_fbi(helper);
479
err_gem_free_object:
480
	drm_gem_object_unreference_unlocked(&obj->base);
481 482 483
	return ret;
}

484
static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
485
	.fb_probe = drm_fbdev_cma_create,
486 487 488
};

/**
489
 * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
490 491 492 493
 * @dev: DRM device
 * @preferred_bpp: Preferred bits per pixel for the device
 * @num_crtc: Number of CRTCs
 * @max_conn_count: Maximum number of connectors
494
 * @funcs: fb helper functions, in particular a custom dirty() callback
495 496 497
 *
 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
 */
498
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
499
	unsigned int preferred_bpp, unsigned int num_crtc,
500
	unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs)
501 502 503 504 505 506 507 508 509 510
{
	struct drm_fbdev_cma *fbdev_cma;
	struct drm_fb_helper *helper;
	int ret;

	fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
	if (!fbdev_cma) {
		dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
		return ERR_PTR(-ENOMEM);
	}
511
	fbdev_cma->fb_funcs = funcs;
512 513 514

	helper = &fbdev_cma->fb_helper;

515
	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
516

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
	ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
	if (ret < 0) {
		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
		goto err_free;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		dev_err(dev->dev, "Failed to add connectors.\n");
		goto err_drm_fb_helper_fini;

	}

	ret = drm_fb_helper_initial_config(helper, preferred_bpp);
	if (ret < 0) {
M
Masanari Iida 已提交
532
		dev_err(dev->dev, "Failed to set initial hw configuration.\n");
533 534 535 536 537 538 539 540 541 542 543 544
		goto err_drm_fb_helper_fini;
	}

	return fbdev_cma;

err_drm_fb_helper_fini:
	drm_fb_helper_fini(helper);
err_free:
	kfree(fbdev_cma);

	return ERR_PTR(ret);
}
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);

/**
 * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
 * @dev: DRM device
 * @preferred_bpp: Preferred bits per pixel for the device
 * @num_crtc: Number of CRTCs
 * @max_conn_count: Maximum number of connectors
 *
 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
 */
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
	unsigned int preferred_bpp, unsigned int num_crtc,
	unsigned int max_conn_count)
{
	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
561
				max_conn_count, &drm_fb_cma_funcs);
562
}
563 564 565 566 567 568 569 570
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);

/**
 * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
 * @fbdev_cma: The drm_fbdev_cma struct
 */
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
571
	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
572 573
	if (fbdev_cma->fb_helper.fbdev)
		drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
574
	drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
575

576 577
	if (fbdev_cma->fb)
		drm_framebuffer_remove(&fbdev_cma->fb->fb);
578 579 580 581 582 583 584 585 586 587

	drm_fb_helper_fini(&fbdev_cma->fb_helper);
	kfree(fbdev_cma);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);

/**
 * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 *
588
 * This function is usually called from the &drm_driver.lastclose callback.
589 590 591
 */
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
592 593
	if (fbdev_cma)
		drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
594 595 596 597 598 599 600
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);

/**
 * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 *
601
 * This function is usually called from the &drm_mode_config.output_poll_changed
602 603 604 605 606 607 608 609
 * callback.
 */
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
{
	if (fbdev_cma)
		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624

/**
 * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 * @state: desired state, zero to resume, non-zero to suspend
 *
 * Calls drm_fb_helper_set_suspend, which is a wrapper around
 * fb_set_suspend implemented by fbdev core.
 */
void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
{
	if (fbdev_cma)
		drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
}
EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);