drm_fb_cma_helper.c 18.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * drm kms/fb cma (contiguous memory allocator) helper functions
 *
 * Copyright (C) 2012 Analog Device Inc.
 *   Author: Lars-Peter Clausen <lars@metafoo.de>
 *
 * Based on udl_fbdev.c
 *  Copyright (C) 2012 Red Hat
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <drm/drmP.h>
21
#include <drm/drm_atomic.h>
22 23 24 25 26
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
27
#include <linux/dma-buf.h>
28
#include <linux/dma-mapping.h>
29
#include <linux/module.h>
30
#include <linux/reservation.h>
31

32 33
#define DEFAULT_FBDEFIO_DELAY_MS 50

34 35 36 37 38 39 40 41
struct drm_fb_cma {
	struct drm_framebuffer		fb;
	struct drm_gem_cma_object	*obj[4];
};

struct drm_fbdev_cma {
	struct drm_fb_helper	fb_helper;
	struct drm_fb_cma	*fb;
42
	const struct drm_framebuffer_funcs *fb_funcs;
43 44
};

45 46 47 48 49 50
/**
 * DOC: framebuffer cma helper functions
 *
 * Provides helper functions for creating a cma (contiguous memory allocator)
 * backed framebuffer.
 *
51
 * drm_fb_cma_create() is used in the &drm_mode_config_funcs.fb_create
52
 * callback function to create a cma backed framebuffer.
53 54 55
 *
 * An fbdev framebuffer backed by cma is also available by calling
 * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
56 57 58
 * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
 * set up automatically. &drm_framebuffer_funcs.dirty is called by
 * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
59
 *
60
 * Example fbdev deferred io code::
61
 *
62 63 64 65 66
 *     static int driver_fb_dirty(struct drm_framebuffer *fb,
 *                                struct drm_file *file_priv,
 *                                unsigned flags, unsigned color,
 *                                struct drm_clip_rect *clips,
 *                                unsigned num_clips)
67 68 69 70 71 72
 *     {
 *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
 *         ... push changes ...
 *         return 0;
 *     }
 *
73
 *     static struct drm_framebuffer_funcs driver_fb_funcs = {
74 75
 *         .destroy       = drm_fb_cma_destroy,
 *         .create_handle = drm_fb_cma_create_handle,
76
 *         .dirty         = driver_fb_dirty,
77 78
 *     };
 *
79
 * Initialize::
80 81 82 83
 *
 *     fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
 *                                           dev->mode_config.num_crtc,
 *                                           dev->mode_config.num_connector,
84
 *                                           &driver_fb_funcs);
85 86 87
 *
 */

88 89 90 91 92 93 94 95 96 97
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
{
	return container_of(helper, struct drm_fbdev_cma, fb_helper);
}

static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
{
	return container_of(fb, struct drm_fb_cma, fb);
}

98
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
99 100 101 102 103 104
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
	int i;

	for (i = 0; i < 4; i++) {
		if (fb_cma->obj[i])
105
			drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
106 107 108 109 110
	}

	drm_framebuffer_cleanup(fb);
	kfree(fb_cma);
}
111
EXPORT_SYMBOL(drm_fb_cma_destroy);
112

113
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
114 115 116 117 118 119 120
	struct drm_file *file_priv, unsigned int *handle)
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);

	return drm_gem_handle_create(file_priv,
			&fb_cma->obj[0]->base, handle);
}
121
EXPORT_SYMBOL(drm_fb_cma_create_handle);
122 123 124 125 126 127 128

static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
	.destroy	= drm_fb_cma_destroy,
	.create_handle	= drm_fb_cma_create_handle,
};

static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
129 130
	const struct drm_mode_fb_cmd2 *mode_cmd,
	struct drm_gem_cma_object **obj,
131
	unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
132 133 134 135 136 137 138 139 140
{
	struct drm_fb_cma *fb_cma;
	int ret;
	int i;

	fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
	if (!fb_cma)
		return ERR_PTR(-ENOMEM);

141
	drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
142 143 144 145

	for (i = 0; i < num_planes; i++)
		fb_cma->obj[i] = obj[i];

146
	ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
147
	if (ret) {
M
Masanari Iida 已提交
148
		dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
149 150 151 152 153 154 155 156
		kfree(fb_cma);
		return ERR_PTR(ret);
	}

	return fb_cma;
}

/**
157
 * drm_fb_cma_create_with_funcs() - helper function for the
158 159
 *                                  &drm_mode_config_funcs.fb_create
 *                                  callback
160 161 162 163
 * @dev: DRM device
 * @file_priv: drm file for the ioctl call
 * @mode_cmd: metadata from the userspace fb creation request
 * @funcs: vtable to be used for the new framebuffer object
164
 *
165
 * This can be used to set &drm_framebuffer_funcs for drivers that need the
166 167
 * &drm_framebuffer_funcs.dirty callback. Use drm_fb_cma_create() if you don't
 * need to change &drm_framebuffer_funcs.
168
 */
169 170 171
struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
	const struct drm_framebuffer_funcs *funcs)
172
{
173
	const struct drm_format_info *info;
174 175 176 177 178 179
	struct drm_fb_cma *fb_cma;
	struct drm_gem_cma_object *objs[4];
	struct drm_gem_object *obj;
	int ret;
	int i;

180
	info = drm_get_format_info(dev, mode_cmd);
181 182
	if (!info)
		return ERR_PTR(-EINVAL);
183

184 185 186
	for (i = 0; i < info->num_planes; i++) {
		unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
		unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
187 188
		unsigned int min_size;

189
		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
190 191
		if (!obj) {
			dev_err(dev->dev, "Failed to lookup GEM object\n");
192
			ret = -ENOENT;
193
			goto err_gem_object_put;
194 195 196
		}

		min_size = (height - 1) * mode_cmd->pitches[i]
197
			 + width * info->cpp[i]
198 199 200
			 + mode_cmd->offsets[i];

		if (obj->size < min_size) {
201
			drm_gem_object_put_unlocked(obj);
202
			ret = -EINVAL;
203
			goto err_gem_object_put;
204 205 206 207
		}
		objs[i] = to_drm_gem_cma_obj(obj);
	}

208
	fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
209 210
	if (IS_ERR(fb_cma)) {
		ret = PTR_ERR(fb_cma);
211
		goto err_gem_object_put;
212 213 214 215
	}

	return &fb_cma->fb;

216
err_gem_object_put:
217
	for (i--; i >= 0; i--)
218
		drm_gem_object_put_unlocked(&objs[i]->base);
219 220
	return ERR_PTR(ret);
}
221 222 223
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);

/**
224
 * drm_fb_cma_create() - &drm_mode_config_funcs.fb_create callback function
225 226 227
 * @dev: DRM device
 * @file_priv: drm file for the ioctl call
 * @mode_cmd: metadata from the userspace fb creation request
228 229 230
 *
 * If your hardware has special alignment or pitch requirements these should be
 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
231
 * you need to set &drm_framebuffer_funcs.dirty.
232 233 234 235 236 237 238
 */
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
	struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
	return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
					    &drm_fb_cma_funcs);
}
239 240 241 242 243 244 245 246 247 248 249 250
EXPORT_SYMBOL_GPL(drm_fb_cma_create);

/**
 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
 * @fb: The framebuffer
 * @plane: Which plane
 *
 * Return the CMA GEM object for given framebuffer.
 *
 * This function will usually be called from the CRTC callback functions.
 */
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
251
						  unsigned int plane)
252 253 254 255 256 257 258 259 260 261
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);

	if (plane >= 4)
		return NULL;

	return fb_cma->obj[plane];
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/**
 * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
 * @fb: The framebuffer
 * @state: Which state of drm plane
 * @plane: Which plane
 * Return the CMA GEM address for given framebuffer.
 *
 * This function will usually be called from the PLANE callback functions.
 */
dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
				   struct drm_plane_state *state,
				   unsigned int plane)
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
	dma_addr_t paddr;

	if (plane >= 4)
		return 0;

	paddr = fb_cma->obj[plane]->paddr + fb->offsets[plane];
	paddr += fb->format->cpp[plane] * (state->src_x >> 16);
	paddr += fb->pitches[plane] * (state->src_y >> 16);

	return paddr;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);

289 290 291 292 293
/**
 * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
 * @plane: Which plane
 * @state: Plane state attach fence to
 *
294
 * This should be set as the &struct drm_plane_helper_funcs.prepare_fb hook.
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
 *
 * This function checks if the plane FB has an dma-buf attached, extracts
 * the exclusive fence and attaches it to plane state for the atomic helper
 * to wait on.
 *
 * There is no need for cleanup_fb for CMA based framebuffer drivers.
 */
int drm_fb_cma_prepare_fb(struct drm_plane *plane,
			  struct drm_plane_state *state)
{
	struct dma_buf *dma_buf;
	struct dma_fence *fence;

	if ((plane->state->fb == state->fb) || !state->fb)
		return 0;

	dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
	if (dma_buf) {
		fence = reservation_object_get_excl_rcu(dma_buf->resv);
		drm_atomic_set_fence_for_plane(state, fence);
	}

	return 0;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);

R
Rob Clark 已提交
321
#ifdef CONFIG_DEBUG_FS
322
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
R
Rob Clark 已提交
323 324
{
	struct drm_fb_cma *fb_cma = to_fb_cma(fb);
325
	int i;
R
Rob Clark 已提交
326 327

	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
V
Ville Syrjälä 已提交
328
			(char *)&fb->format->format);
R
Rob Clark 已提交
329

330
	for (i = 0; i < fb->format->num_planes; i++) {
R
Rob Clark 已提交
331 332 333 334 335 336 337 338
		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
				i, fb->offsets[i], fb->pitches[i]);
		drm_gem_cma_describe(fb_cma->obj[i], m);
	}
}

/**
 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
339 340 341
 *			       in debugfs.
 * @m: output file
 * @arg: private data for the callback
R
Rob Clark 已提交
342 343 344 345 346 347 348
 */
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct drm_framebuffer *fb;

349
	mutex_lock(&dev->mode_config.fb_lock);
350
	drm_for_each_fb(fb, dev)
R
Rob Clark 已提交
351
		drm_fb_cma_describe(fb, m);
352
	mutex_unlock(&dev->mode_config.fb_lock);
R
Rob Clark 已提交
353 354 355 356 357 358

	return 0;
}
EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
#endif

359 360 361 362 363 364
static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	return dma_mmap_writecombine(info->device, vma, info->screen_base,
				     info->fix.smem_start, info->fix.smem_len);
}

365 366
static struct fb_ops drm_fbdev_cma_ops = {
	.owner		= THIS_MODULE,
367
	DRM_FB_HELPER_DEFAULT_OPS,
368 369 370
	.fb_fillrect	= drm_fb_helper_sys_fillrect,
	.fb_copyarea	= drm_fb_helper_sys_copyarea,
	.fb_imageblit	= drm_fb_helper_sys_imageblit,
371
	.fb_mmap	= drm_fb_cma_mmap,
372 373
};

374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
					  struct vm_area_struct *vma)
{
	fb_deferred_io_mmap(info, vma);
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

	return 0;
}

static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
				    struct drm_gem_cma_object *cma_obj)
{
	struct fb_deferred_io *fbdefio;
	struct fb_ops *fbops;

	/*
	 * Per device structures are needed because:
	 * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
	 * fbdefio: individual delays
	 */
	fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
	fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
	if (!fbdefio || !fbops) {
		kfree(fbdefio);
S
Sudip Mukherjee 已提交
398
		kfree(fbops);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
		return -ENOMEM;
	}

	/* can't be offset from vaddr since dirty() uses cma_obj */
	fbi->screen_buffer = cma_obj->vaddr;
	/* fb_deferred_io_fault() needs a physical address */
	fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));

	*fbops = *fbi->fbops;
	fbi->fbops = fbops;

	fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
	fbdefio->deferred_io = drm_fb_helper_deferred_io;
	fbi->fbdefio = fbdefio;
	fb_deferred_io_init(fbi);
	fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;

	return 0;
}

static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
{
	if (!fbi->fbdefio)
		return;

	fb_deferred_io_cleanup(fbi);
	kfree(fbi->fbdefio);
	kfree(fbi->fbops);
}

429 430 431
static int
drm_fbdev_cma_create(struct drm_fb_helper *helper,
	struct drm_fb_helper_surface_size *sizes)
432 433 434 435 436 437 438 439 440 441 442 443
{
	struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
	struct drm_device *dev = helper->dev;
	struct drm_gem_cma_object *obj;
	struct drm_framebuffer *fb;
	unsigned int bytes_per_pixel;
	unsigned long offset;
	struct fb_info *fbi;
	size_t size;
	int ret;

444
	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
445 446 447 448 449 450 451 452 453 454 455 456 457
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
		sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	obj = drm_gem_cma_create(dev, size);
458
	if (IS_ERR(obj))
459 460
		return -ENOMEM;

461 462 463
	fbi = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(fbi)) {
		ret = PTR_ERR(fbi);
464
		goto err_gem_free_object;
465 466
	}

467 468
	fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
					 fbdev_cma->fb_funcs);
469 470 471
	if (IS_ERR(fbdev_cma->fb)) {
		dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
		ret = PTR_ERR(fbdev_cma->fb);
472
		goto err_fb_info_destroy;
473 474 475 476 477 478 479 480 481
	}

	fb = &fbdev_cma->fb->fb;
	helper->fb = fb;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &drm_fbdev_cma_ops;

V
Ville Syrjälä 已提交
482
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
R
Rob Clark 已提交
483
	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
484 485 486 487 488 489 490 491 492 493

	offset = fbi->var.xoffset * bytes_per_pixel;
	offset += fbi->var.yoffset * fb->pitches[0];

	dev->mode_config.fb_base = (resource_size_t)obj->paddr;
	fbi->screen_base = obj->vaddr + offset;
	fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
	fbi->screen_size = size;
	fbi->fix.smem_len = size;

494
	if (fbdev_cma->fb_funcs->dirty) {
495 496 497 498 499
		ret = drm_fbdev_cma_defio_init(fbi, obj);
		if (ret)
			goto err_cma_destroy;
	}

500 501
	return 0;

502
err_cma_destroy:
503
	drm_framebuffer_remove(&fbdev_cma->fb->fb);
504
err_fb_info_destroy:
505
	drm_fb_helper_fini(helper);
506
err_gem_free_object:
507
	drm_gem_object_put_unlocked(&obj->base);
508 509 510
	return ret;
}

511
static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
512
	.fb_probe = drm_fbdev_cma_create,
513 514 515
};

/**
516
 * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
517 518 519
 * @dev: DRM device
 * @preferred_bpp: Preferred bits per pixel for the device
 * @max_conn_count: Maximum number of connectors
520
 * @funcs: fb helper functions, in particular a custom dirty() callback
521 522 523
 *
 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
 */
524
struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
525 526
	unsigned int preferred_bpp, unsigned int max_conn_count,
	const struct drm_framebuffer_funcs *funcs)
527 528 529 530 531 532 533 534 535 536
{
	struct drm_fbdev_cma *fbdev_cma;
	struct drm_fb_helper *helper;
	int ret;

	fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
	if (!fbdev_cma) {
		dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
		return ERR_PTR(-ENOMEM);
	}
537
	fbdev_cma->fb_funcs = funcs;
538 539 540

	helper = &fbdev_cma->fb_helper;

541
	drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
542

543
	ret = drm_fb_helper_init(dev, helper, max_conn_count);
544 545 546 547 548 549 550 551 552 553 554 555 556 557
	if (ret < 0) {
		dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
		goto err_free;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		dev_err(dev->dev, "Failed to add connectors.\n");
		goto err_drm_fb_helper_fini;

	}

	ret = drm_fb_helper_initial_config(helper, preferred_bpp);
	if (ret < 0) {
M
Masanari Iida 已提交
558
		dev_err(dev->dev, "Failed to set initial hw configuration.\n");
559 560 561 562 563 564 565 566 567 568 569 570
		goto err_drm_fb_helper_fini;
	}

	return fbdev_cma;

err_drm_fb_helper_fini:
	drm_fb_helper_fini(helper);
err_free:
	kfree(fbdev_cma);

	return ERR_PTR(ret);
}
571 572 573 574 575 576 577 578 579 580 581
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);

/**
 * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
 * @dev: DRM device
 * @preferred_bpp: Preferred bits per pixel for the device
 * @max_conn_count: Maximum number of connectors
 *
 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
 */
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
582
	unsigned int preferred_bpp, unsigned int max_conn_count)
583
{
584 585 586
	return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
					     max_conn_count,
					     &drm_fb_cma_funcs);
587
}
588 589 590 591 592 593 594 595
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);

/**
 * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
 * @fbdev_cma: The drm_fbdev_cma struct
 */
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
596
	drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
597 598
	if (fbdev_cma->fb_helper.fbdev)
		drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
599

600 601
	if (fbdev_cma->fb)
		drm_framebuffer_remove(&fbdev_cma->fb->fb);
602 603 604 605 606 607 608 609 610 611

	drm_fb_helper_fini(&fbdev_cma->fb_helper);
	kfree(fbdev_cma);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);

/**
 * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 *
612
 * This function is usually called from the &drm_driver.lastclose callback.
613 614 615
 */
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
{
616 617
	if (fbdev_cma)
		drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev_cma->fb_helper);
618 619 620 621 622 623 624
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);

/**
 * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 *
625
 * This function is usually called from the &drm_mode_config.output_poll_changed
626 627 628 629 630 631 632 633
 * callback.
 */
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
{
	if (fbdev_cma)
		drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
634 635 636 637 638 639 640 641 642

/**
 * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 * @state: desired state, zero to resume, non-zero to suspend
 *
 * Calls drm_fb_helper_set_suspend, which is a wrapper around
 * fb_set_suspend implemented by fbdev core.
 */
643
void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
644 645 646 647 648
{
	if (fbdev_cma)
		drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
}
EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
649 650 651 652 653 654 655 656 657 658 659

/**
 * drm_fbdev_cma_set_suspend_unlocked - wrapper around
 *                                      drm_fb_helper_set_suspend_unlocked
 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
 * @state: desired state, zero to resume, non-zero to suspend
 *
 * Calls drm_fb_helper_set_suspend, which is a wrapper around
 * fb_set_suspend implemented by fbdev core.
 */
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
660
					bool state)
661 662 663 664 665 666
{
	if (fbdev_cma)
		drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
						   state);
}
EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);