exynos_drm_fbdev.c 9.3 KB
Newer Older
1 2 3 4 5 6 7 8
/* exynos_drm_fbdev.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Authors:
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *	Seung-Woo Kim <sw0312.kim@samsung.com>
 *
9 10 11 12
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
13 14
 */

15 16 17 18
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
19
#include <drm/exynos_drm.h>
20 21 22

#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
I
Inki Dae 已提交
23
#include "exynos_drm_gem.h"
24
#include "exynos_drm_iommu.h"
25 26 27 28 29 30 31 32

#define MAX_CONNECTOR		4
#define PREFERRED_BPP		32

#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
				drm_fb_helper)

struct exynos_drm_fbdev {
33 34
	struct drm_fb_helper		drm_fb_helper;
	struct exynos_drm_gem_obj	*exynos_gem_obj;
35 36
};

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
static int exynos_drm_fb_mmap(struct fb_info *info,
			struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
	unsigned long vm_size;
	int ret;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;

	vm_size = vma->vm_end - vma->vm_start;

	if (vm_size > buffer->size)
		return -EINVAL;

54
	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
55 56 57 58 59 60 61 62 63
		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

	return 0;
}

64 65
static struct fb_ops exynos_drm_fb_ops = {
	.owner		= THIS_MODULE,
66
	.fb_mmap        = exynos_drm_fb_mmap,
67 68 69 70
	.fb_fillrect	= cfb_fillrect,
	.fb_copyarea	= cfb_copyarea,
	.fb_imageblit	= cfb_imageblit,
	.fb_check_var	= drm_fb_helper_check_var,
71
	.fb_set_par	= drm_fb_helper_set_par,
72 73 74 75 76
	.fb_blank	= drm_fb_helper_blank,
	.fb_pan_display	= drm_fb_helper_pan_display,
	.fb_setcmap	= drm_fb_helper_setcmap,
};

77
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
78
				     struct drm_framebuffer *fb)
79 80 81
{
	struct fb_info *fbi = helper->fbdev;
	struct drm_device *dev = helper->dev;
I
Inki Dae 已提交
82
	struct exynos_drm_gem_buf *buffer;
83
	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
84
	unsigned long offset;
85

86
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
87
	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
88

S
Seung-Woo Kim 已提交
89 90
	/* RGB formats use only one buffer */
	buffer = exynos_drm_fb_buffer(fb, 0);
I
Inki Dae 已提交
91 92
	if (!buffer) {
		DRM_LOG_KMS("buffer is null.\n");
93 94
		return -EFAULT;
	}
95

96 97
	/* map pages with kernel virtual space. */
	if (!buffer->kvaddr) {
98 99 100 101
		if (is_drm_iommu_supported(dev)) {
			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;

			buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
102
					pgprot_writecombine(PAGE_KERNEL));
103 104 105 106 107 108 109
		} else {
			phys_addr_t dma_addr = buffer->dma_addr;
			if (dma_addr)
				buffer->kvaddr = phys_to_virt(dma_addr);
			else
				buffer->kvaddr = (void __iomem *)NULL;
		}
110 111 112 113 114 115
		if (!buffer->kvaddr) {
			DRM_ERROR("failed to map pages to kernel space.\n");
			return -EIO;
		}
	}

116 117 118
	/* buffer count to framebuffer always is 1 at booting time. */
	exynos_drm_fb_set_buf_cnt(fb, 1);

119
	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
120
	offset += fbi->var.yoffset * fb->pitches[0];
121

I
Inki Dae 已提交
122 123
	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
	fbi->screen_base = buffer->kvaddr + offset;
124 125
	if (is_drm_iommu_supported(dev))
		fbi->fix.smem_start = (unsigned long)
126
			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
127 128 129
	else
		fbi->fix.smem_start = (unsigned long)buffer->dma_addr;

130 131
	fbi->screen_size = size;
	fbi->fix.smem_len = size;
132 133

	return 0;
134 135 136 137 138 139
}

static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
				    struct drm_fb_helper_surface_size *sizes)
{
	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
140
	struct exynos_drm_gem_obj *exynos_gem_obj;
141 142
	struct drm_device *dev = helper->dev;
	struct fb_info *fbi;
143
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
144
	struct platform_device *pdev = dev->platformdev;
145
	unsigned long size;
146 147 148 149 150 151 152 153
	int ret;

	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
154 155 156
	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
157 158 159 160 161 162 163 164 165 166

	mutex_lock(&dev->struct_mutex);

	fbi = framebuffer_alloc(0, &pdev->dev);
	if (!fbi) {
		DRM_ERROR("failed to allocate fb info.\n");
		ret = -ENOMEM;
		goto out;
	}

167
	size = mode_cmd.pitches[0] * mode_cmd.height;
168

169 170 171 172 173 174 175 176 177 178 179 180
	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
	/*
	 * If physically contiguous memory allocation fails and if IOMMU is
	 * supported then try to get buffer from non physically contiguous
	 * memory area.
	 */
	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
							size);
	}

181 182
	if (IS_ERR(exynos_gem_obj)) {
		ret = PTR_ERR(exynos_gem_obj);
183
		goto err_release_framebuffer;
184 185 186 187 188 189
	}

	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;

	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
			&exynos_gem_obj->base);
190
	if (IS_ERR(helper->fb)) {
191
		DRM_ERROR("failed to create drm framebuffer.\n");
192
		ret = PTR_ERR(helper->fb);
193
		goto err_destroy_gem;
194 195 196 197 198 199 200 201 202 203 204
	}

	helper->fbdev = fbi;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &exynos_drm_fb_ops;

	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
	if (ret) {
		DRM_ERROR("failed to allocate cmap.\n");
205
		goto err_destroy_framebuffer;
206 207
	}

208
	ret = exynos_drm_fbdev_update(helper, helper->fb);
209 210 211 212 213 214 215 216 217 218 219 220 221 222
	if (ret < 0)
		goto err_dealloc_cmap;

	mutex_unlock(&dev->struct_mutex);
	return ret;

err_dealloc_cmap:
	fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
	drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
	exynos_drm_gem_destroy(exynos_gem_obj);
err_release_framebuffer:
	framebuffer_release(fbi);
223 224 225 226 227 228 229 230 231 232 233 234

/*
 * if failed, all resources allocated above would be released by
 * drm_mode_config_cleanup() when drm_load() had been called prior
 * to any specific driver such as fimd or hdmi driver.
 */
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
235
	.fb_probe =	exynos_drm_fbdev_create,
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
};

int exynos_drm_fbdev_init(struct drm_device *dev)
{
	struct exynos_drm_fbdev *fbdev;
	struct exynos_drm_private *private = dev->dev_private;
	struct drm_fb_helper *helper;
	unsigned int num_crtc;
	int ret;

	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
		return 0;

	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
	if (!fbdev) {
		DRM_ERROR("failed to allocate drm fbdev.\n");
		return -ENOMEM;
	}

	private->fb_helper = helper = &fbdev->drm_fb_helper;
	helper->funcs = &exynos_drm_fb_helper_funcs;

	num_crtc = dev->mode_config.num_crtc;

	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
	if (ret < 0) {
		DRM_ERROR("failed to initialize drm fb helper.\n");
		goto err_init;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
		goto err_setup;

	}

273 274 275
	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(dev);

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
	if (ret < 0) {
		DRM_ERROR("failed to set up hw configuration.\n");
		goto err_setup;
	}

	return 0;

err_setup:
	drm_fb_helper_fini(helper);

err_init:
	private->fb_helper = NULL;
	kfree(fbdev);

	return ret;
}

static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
297 298
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
299 300
	struct drm_framebuffer *fb;

301
	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
302 303
		vunmap(exynos_gem_obj->buffer->kvaddr);

304 305 306
	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
307 308
		if (fb) {
			drm_framebuffer_unregister_private(fb);
R
Rob Clark 已提交
309
			drm_framebuffer_remove(fb);
310
		}
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	}

	/* release linux framebuffer */
	if (fb_helper->fbdev) {
		struct fb_info *info;
		int ret;

		info = fb_helper->fbdev;
		ret = unregister_framebuffer(info);
		if (ret < 0)
			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");

		if (info->cmap.len)
			fb_dealloc_cmap(&info->cmap);

		framebuffer_release(info);
	}

	drm_fb_helper_fini(fb_helper);
}

void exynos_drm_fbdev_fini(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;
	struct exynos_drm_fbdev *fbdev;

	if (!private || !private->fb_helper)
		return;

	fbdev = to_exynos_fbdev(private->fb_helper);

342 343 344
	if (fbdev->exynos_gem_obj)
		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);

345 346 347 348 349 350 351 352 353 354 355 356
	exynos_drm_fbdev_destroy(dev, private->fb_helper);
	kfree(fbdev);
	private->fb_helper = NULL;
}

void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;

	if (!private || !private->fb_helper)
		return;

357
	drm_modeset_lock_all(dev);
358
	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
359
	drm_modeset_unlock_all(dev);
360
}