exynos_drm_fbdev.c 8.9 KB
Newer Older
1 2 3 4 5 6 7 8
/* exynos_drm_fbdev.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Authors:
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *	Seung-Woo Kim <sw0312.kim@samsung.com>
 *
9 10 11 12
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
13 14
 */

15 16 17 18
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
19 20 21

#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
I
Inki Dae 已提交
22
#include "exynos_drm_gem.h"
23
#include "exynos_drm_iommu.h"
24 25 26 27 28 29 30 31

#define MAX_CONNECTOR		4
#define PREFERRED_BPP		32

#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
				drm_fb_helper)

struct exynos_drm_fbdev {
32 33
	struct drm_fb_helper		drm_fb_helper;
	struct exynos_drm_gem_obj	*exynos_gem_obj;
34 35
};

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static int exynos_drm_fb_mmap(struct fb_info *info,
			struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
	unsigned long vm_size;
	int ret;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;

	vm_size = vma->vm_end - vma->vm_start;

	if (vm_size > buffer->size)
		return -EINVAL;

53
	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
54 55 56 57 58 59 60 61 62
		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

	return 0;
}

63 64
static struct fb_ops exynos_drm_fb_ops = {
	.owner		= THIS_MODULE,
65
	.fb_mmap        = exynos_drm_fb_mmap,
66 67 68 69
	.fb_fillrect	= cfb_fillrect,
	.fb_copyarea	= cfb_copyarea,
	.fb_imageblit	= cfb_imageblit,
	.fb_check_var	= drm_fb_helper_check_var,
70
	.fb_set_par	= drm_fb_helper_set_par,
71 72 73 74 75
	.fb_blank	= drm_fb_helper_blank,
	.fb_pan_display	= drm_fb_helper_pan_display,
	.fb_setcmap	= drm_fb_helper_setcmap,
};

76
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
77
				     struct drm_framebuffer *fb)
78 79 80
{
	struct fb_info *fbi = helper->fbdev;
	struct drm_device *dev = helper->dev;
I
Inki Dae 已提交
81
	struct exynos_drm_gem_buf *buffer;
82
	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
83
	unsigned long offset;
84

85
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
86
	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
87

S
Seung-Woo Kim 已提交
88 89
	/* RGB formats use only one buffer */
	buffer = exynos_drm_fb_buffer(fb, 0);
I
Inki Dae 已提交
90 91
	if (!buffer) {
		DRM_LOG_KMS("buffer is null.\n");
92 93
		return -EFAULT;
	}
94

95 96
	/* map pages with kernel virtual space. */
	if (!buffer->kvaddr) {
97 98 99 100
		if (is_drm_iommu_supported(dev)) {
			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;

			buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
101
					pgprot_writecombine(PAGE_KERNEL));
102 103 104 105 106 107 108
		} else {
			phys_addr_t dma_addr = buffer->dma_addr;
			if (dma_addr)
				buffer->kvaddr = phys_to_virt(dma_addr);
			else
				buffer->kvaddr = (void __iomem *)NULL;
		}
109 110 111 112 113 114
		if (!buffer->kvaddr) {
			DRM_ERROR("failed to map pages to kernel space.\n");
			return -EIO;
		}
	}

115 116 117
	/* buffer count to framebuffer always is 1 at booting time. */
	exynos_drm_fb_set_buf_cnt(fb, 1);

118
	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
119
	offset += fbi->var.yoffset * fb->pitches[0];
120

I
Inki Dae 已提交
121 122
	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
	fbi->screen_base = buffer->kvaddr + offset;
123 124
	if (is_drm_iommu_supported(dev))
		fbi->fix.smem_start = (unsigned long)
125
			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
126 127 128
	else
		fbi->fix.smem_start = (unsigned long)buffer->dma_addr;

129 130
	fbi->screen_size = size;
	fbi->fix.smem_len = size;
131 132

	return 0;
133 134 135 136 137 138
}

static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
				    struct drm_fb_helper_surface_size *sizes)
{
	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
139
	struct exynos_drm_gem_obj *exynos_gem_obj;
140 141
	struct drm_device *dev = helper->dev;
	struct fb_info *fbi;
142
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
143
	struct platform_device *pdev = dev->platformdev;
144
	unsigned long size;
145 146 147 148 149 150 151 152
	int ret;

	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
153 154 155
	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
156 157 158 159 160 161 162 163 164 165

	mutex_lock(&dev->struct_mutex);

	fbi = framebuffer_alloc(0, &pdev->dev);
	if (!fbi) {
		DRM_ERROR("failed to allocate fb info.\n");
		ret = -ENOMEM;
		goto out;
	}

166
	size = mode_cmd.pitches[0] * mode_cmd.height;
167 168 169

	/* 0 means to allocate physically continuous memory */
	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
170 171
	if (IS_ERR(exynos_gem_obj)) {
		ret = PTR_ERR(exynos_gem_obj);
172
		goto err_release_framebuffer;
173 174 175 176 177 178
	}

	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;

	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
			&exynos_gem_obj->base);
179
	if (IS_ERR(helper->fb)) {
180
		DRM_ERROR("failed to create drm framebuffer.\n");
181
		ret = PTR_ERR(helper->fb);
182
		goto err_destroy_gem;
183 184 185 186 187 188 189 190 191 192 193
	}

	helper->fbdev = fbi;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &exynos_drm_fb_ops;

	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
	if (ret) {
		DRM_ERROR("failed to allocate cmap.\n");
194
		goto err_destroy_framebuffer;
195 196
	}

197
	ret = exynos_drm_fbdev_update(helper, helper->fb);
198 199 200 201 202 203 204 205 206 207 208 209 210 211
	if (ret < 0)
		goto err_dealloc_cmap;

	mutex_unlock(&dev->struct_mutex);
	return ret;

err_dealloc_cmap:
	fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
	drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
	exynos_drm_gem_destroy(exynos_gem_obj);
err_release_framebuffer:
	framebuffer_release(fbi);
212 213 214 215 216 217 218 219 220 221 222 223

/*
 * if failed, all resources allocated above would be released by
 * drm_mode_config_cleanup() when drm_load() had been called prior
 * to any specific driver such as fimd or hdmi driver.
 */
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
224
	.fb_probe =	exynos_drm_fbdev_create,
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
};

int exynos_drm_fbdev_init(struct drm_device *dev)
{
	struct exynos_drm_fbdev *fbdev;
	struct exynos_drm_private *private = dev->dev_private;
	struct drm_fb_helper *helper;
	unsigned int num_crtc;
	int ret;

	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
		return 0;

	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
	if (!fbdev) {
		DRM_ERROR("failed to allocate drm fbdev.\n");
		return -ENOMEM;
	}

	private->fb_helper = helper = &fbdev->drm_fb_helper;
	helper->funcs = &exynos_drm_fb_helper_funcs;

	num_crtc = dev->mode_config.num_crtc;

	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
	if (ret < 0) {
		DRM_ERROR("failed to initialize drm fb helper.\n");
		goto err_init;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
		goto err_setup;

	}

262 263 264
	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(dev);

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
	if (ret < 0) {
		DRM_ERROR("failed to set up hw configuration.\n");
		goto err_setup;
	}

	return 0;

err_setup:
	drm_fb_helper_fini(helper);

err_init:
	private->fb_helper = NULL;
	kfree(fbdev);

	return ret;
}

static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
286 287
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
288 289
	struct drm_framebuffer *fb;

290
	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
291 292
		vunmap(exynos_gem_obj->buffer->kvaddr);

293 294 295
	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
296 297
		if (fb) {
			drm_framebuffer_unregister_private(fb);
R
Rob Clark 已提交
298
			drm_framebuffer_remove(fb);
299
		}
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	}

	/* release linux framebuffer */
	if (fb_helper->fbdev) {
		struct fb_info *info;
		int ret;

		info = fb_helper->fbdev;
		ret = unregister_framebuffer(info);
		if (ret < 0)
			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");

		if (info->cmap.len)
			fb_dealloc_cmap(&info->cmap);

		framebuffer_release(info);
	}

	drm_fb_helper_fini(fb_helper);
}

void exynos_drm_fbdev_fini(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;
	struct exynos_drm_fbdev *fbdev;

	if (!private || !private->fb_helper)
		return;

	fbdev = to_exynos_fbdev(private->fb_helper);

331 332 333
	if (fbdev->exynos_gem_obj)
		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);

334 335 336 337 338 339 340 341 342 343 344 345
	exynos_drm_fbdev_destroy(dev, private->fb_helper);
	kfree(fbdev);
	private->fb_helper = NULL;
}

void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;

	if (!private || !private->fb_helper)
		return;

346
	drm_modeset_lock_all(dev);
347
	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
348
	drm_modeset_unlock_all(dev);
349
}