exynos_drm_fbdev.c 9.3 KB
Newer Older
1 2 3 4 5 6 7 8
/* exynos_drm_fbdev.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Authors:
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *	Seung-Woo Kim <sw0312.kim@samsung.com>
 *
9 10 11 12
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
13 14
 */

15 16 17 18
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
19
#include <drm/exynos_drm.h>
20 21 22

#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
M
Mark Brown 已提交
23
#include "exynos_drm_fbdev.h"
I
Inki Dae 已提交
24
#include "exynos_drm_gem.h"
25
#include "exynos_drm_iommu.h"
26 27 28 29 30 31 32 33

#define MAX_CONNECTOR		4
#define PREFERRED_BPP		32

#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
				drm_fb_helper)

struct exynos_drm_fbdev {
34 35
	struct drm_fb_helper		drm_fb_helper;
	struct exynos_drm_gem_obj	*exynos_gem_obj;
36 37
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
static int exynos_drm_fb_mmap(struct fb_info *info,
			struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
	unsigned long vm_size;
	int ret;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;

	vm_size = vma->vm_end - vma->vm_start;

	if (vm_size > buffer->size)
		return -EINVAL;

55
	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 57 58 59 60 61 62 63 64
		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

	return 0;
}

65 66
static struct fb_ops exynos_drm_fb_ops = {
	.owner		= THIS_MODULE,
67
	.fb_mmap        = exynos_drm_fb_mmap,
68 69 70 71
	.fb_fillrect	= cfb_fillrect,
	.fb_copyarea	= cfb_copyarea,
	.fb_imageblit	= cfb_imageblit,
	.fb_check_var	= drm_fb_helper_check_var,
72
	.fb_set_par	= drm_fb_helper_set_par,
73 74 75 76 77
	.fb_blank	= drm_fb_helper_blank,
	.fb_pan_display	= drm_fb_helper_pan_display,
	.fb_setcmap	= drm_fb_helper_setcmap,
};

78
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79
				     struct drm_framebuffer *fb)
80 81 82
{
	struct fb_info *fbi = helper->fbdev;
	struct drm_device *dev = helper->dev;
I
Inki Dae 已提交
83
	struct exynos_drm_gem_buf *buffer;
84
	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85
	unsigned long offset;
86

87
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
88
	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
89

S
Seung-Woo Kim 已提交
90 91
	/* RGB formats use only one buffer */
	buffer = exynos_drm_fb_buffer(fb, 0);
I
Inki Dae 已提交
92
	if (!buffer) {
93
		DRM_DEBUG_KMS("buffer is null.\n");
94 95
		return -EFAULT;
	}
96

97 98
	/* map pages with kernel virtual space. */
	if (!buffer->kvaddr) {
99 100 101
		if (is_drm_iommu_supported(dev)) {
			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;

102 103
			buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
					nr_pages, VM_MAP,
104
					pgprot_writecombine(PAGE_KERNEL));
105 106 107
		} else {
			phys_addr_t dma_addr = buffer->dma_addr;
			if (dma_addr)
108
				buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
109 110 111
			else
				buffer->kvaddr = (void __iomem *)NULL;
		}
112 113 114 115 116 117
		if (!buffer->kvaddr) {
			DRM_ERROR("failed to map pages to kernel space.\n");
			return -EIO;
		}
	}

118 119 120
	/* buffer count to framebuffer always is 1 at booting time. */
	exynos_drm_fb_set_buf_cnt(fb, 1);

121
	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122
	offset += fbi->var.yoffset * fb->pitches[0];
123

I
Inki Dae 已提交
124 125
	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
	fbi->screen_base = buffer->kvaddr + offset;
126 127
	if (is_drm_iommu_supported(dev))
		fbi->fix.smem_start = (unsigned long)
128
			(page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
129 130 131
	else
		fbi->fix.smem_start = (unsigned long)buffer->dma_addr;

132 133
	fbi->screen_size = size;
	fbi->fix.smem_len = size;
134 135

	return 0;
136 137 138 139 140 141
}

static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
				    struct drm_fb_helper_surface_size *sizes)
{
	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
142
	struct exynos_drm_gem_obj *exynos_gem_obj;
143 144
	struct drm_device *dev = helper->dev;
	struct fb_info *fbi;
145
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
146
	struct platform_device *pdev = dev->platformdev;
147
	unsigned long size;
148 149 150 151 152 153 154 155
	int ret;

	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
156 157 158
	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
159 160 161 162 163 164 165 166 167 168

	mutex_lock(&dev->struct_mutex);

	fbi = framebuffer_alloc(0, &pdev->dev);
	if (!fbi) {
		DRM_ERROR("failed to allocate fb info.\n");
		ret = -ENOMEM;
		goto out;
	}

169
	size = mode_cmd.pitches[0] * mode_cmd.height;
170

171 172 173 174 175 176 177 178 179 180 181 182
	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
	/*
	 * If physically contiguous memory allocation fails and if IOMMU is
	 * supported then try to get buffer from non physically contiguous
	 * memory area.
	 */
	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
							size);
	}

183 184
	if (IS_ERR(exynos_gem_obj)) {
		ret = PTR_ERR(exynos_gem_obj);
185
		goto err_release_framebuffer;
186 187 188 189 190 191
	}

	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;

	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
			&exynos_gem_obj->base);
192
	if (IS_ERR(helper->fb)) {
193
		DRM_ERROR("failed to create drm framebuffer.\n");
194
		ret = PTR_ERR(helper->fb);
195
		goto err_destroy_gem;
196 197 198 199 200 201 202 203 204 205 206
	}

	helper->fbdev = fbi;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &exynos_drm_fb_ops;

	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
	if (ret) {
		DRM_ERROR("failed to allocate cmap.\n");
207
		goto err_destroy_framebuffer;
208 209
	}

210
	ret = exynos_drm_fbdev_update(helper, helper->fb);
211 212 213 214 215 216 217 218 219 220 221 222 223 224
	if (ret < 0)
		goto err_dealloc_cmap;

	mutex_unlock(&dev->struct_mutex);
	return ret;

err_dealloc_cmap:
	fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
	drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
	exynos_drm_gem_destroy(exynos_gem_obj);
err_release_framebuffer:
	framebuffer_release(fbi);
225 226 227 228 229 230 231 232 233 234 235 236

/*
 * if failed, all resources allocated above would be released by
 * drm_mode_config_cleanup() when drm_load() had been called prior
 * to any specific driver such as fimd or hdmi driver.
 */
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
237
	.fb_probe =	exynos_drm_fbdev_create,
238 239 240 241 242 243 244 245 246 247 248 249 250 251
};

int exynos_drm_fbdev_init(struct drm_device *dev)
{
	struct exynos_drm_fbdev *fbdev;
	struct exynos_drm_private *private = dev->dev_private;
	struct drm_fb_helper *helper;
	unsigned int num_crtc;
	int ret;

	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
		return 0;

	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
252
	if (!fbdev)
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		return -ENOMEM;

	private->fb_helper = helper = &fbdev->drm_fb_helper;
	helper->funcs = &exynos_drm_fb_helper_funcs;

	num_crtc = dev->mode_config.num_crtc;

	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
	if (ret < 0) {
		DRM_ERROR("failed to initialize drm fb helper.\n");
		goto err_init;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
		goto err_setup;

	}

273 274 275
	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(dev);

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
	if (ret < 0) {
		DRM_ERROR("failed to set up hw configuration.\n");
		goto err_setup;
	}

	return 0;

err_setup:
	drm_fb_helper_fini(helper);

err_init:
	private->fb_helper = NULL;
	kfree(fbdev);

	return ret;
}

static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
297 298
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
299 300
	struct drm_framebuffer *fb;

301
	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
302 303
		vunmap(exynos_gem_obj->buffer->kvaddr);

304 305 306
	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
307 308
		if (fb) {
			drm_framebuffer_unregister_private(fb);
R
Rob Clark 已提交
309
			drm_framebuffer_remove(fb);
310
		}
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	}

	/* release linux framebuffer */
	if (fb_helper->fbdev) {
		struct fb_info *info;
		int ret;

		info = fb_helper->fbdev;
		ret = unregister_framebuffer(info);
		if (ret < 0)
			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");

		if (info->cmap.len)
			fb_dealloc_cmap(&info->cmap);

		framebuffer_release(info);
	}

	drm_fb_helper_fini(fb_helper);
}

void exynos_drm_fbdev_fini(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;
	struct exynos_drm_fbdev *fbdev;

	if (!private || !private->fb_helper)
		return;

	fbdev = to_exynos_fbdev(private->fb_helper);

342 343 344
	if (fbdev->exynos_gem_obj)
		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);

345 346 347 348 349 350 351 352 353 354 355 356
	exynos_drm_fbdev_destroy(dev, private->fb_helper);
	kfree(fbdev);
	private->fb_helper = NULL;
}

void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;

	if (!private || !private->fb_helper)
		return;

357
	drm_modeset_lock_all(dev);
358
	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
359
	drm_modeset_unlock_all(dev);
360
}