exynos_drm_fbdev.c 9.5 KB
Newer Older
1 2 3 4 5 6 7 8
/* exynos_drm_fbdev.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Authors:
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *	Seung-Woo Kim <sw0312.kim@samsung.com>
 *
9 10 11 12
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
13 14
 */

15 16 17 18
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
19
#include <drm/exynos_drm.h>
20 21 22

#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
M
Mark Brown 已提交
23
#include "exynos_drm_fbdev.h"
I
Inki Dae 已提交
24
#include "exynos_drm_gem.h"
25
#include "exynos_drm_iommu.h"
26 27 28 29 30 31 32 33

#define MAX_CONNECTOR		4
#define PREFERRED_BPP		32

#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
				drm_fb_helper)

struct exynos_drm_fbdev {
34 35
	struct drm_fb_helper		drm_fb_helper;
	struct exynos_drm_gem_obj	*exynos_gem_obj;
36 37
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
static int exynos_drm_fb_mmap(struct fb_info *info,
			struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
	unsigned long vm_size;
	int ret;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;

	vm_size = vma->vm_end - vma->vm_start;

	if (vm_size > buffer->size)
		return -EINVAL;

55
	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 57 58 59 60 61 62 63 64
		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

	return 0;
}

65 66
static struct fb_ops exynos_drm_fb_ops = {
	.owner		= THIS_MODULE,
67
	.fb_mmap        = exynos_drm_fb_mmap,
68 69 70 71
	.fb_fillrect	= cfb_fillrect,
	.fb_copyarea	= cfb_copyarea,
	.fb_imageblit	= cfb_imageblit,
	.fb_check_var	= drm_fb_helper_check_var,
72
	.fb_set_par	= drm_fb_helper_set_par,
73 74 75 76 77
	.fb_blank	= drm_fb_helper_blank,
	.fb_pan_display	= drm_fb_helper_pan_display,
	.fb_setcmap	= drm_fb_helper_setcmap,
};

78
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79
				     struct drm_framebuffer *fb)
80 81 82
{
	struct fb_info *fbi = helper->fbdev;
	struct drm_device *dev = helper->dev;
I
Inki Dae 已提交
83
	struct exynos_drm_gem_buf *buffer;
84
	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85
	unsigned long offset;
86

87
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
88
	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
89

S
Seung-Woo Kim 已提交
90 91
	/* RGB formats use only one buffer */
	buffer = exynos_drm_fb_buffer(fb, 0);
I
Inki Dae 已提交
92
	if (!buffer) {
93
		DRM_DEBUG_KMS("buffer is null.\n");
94 95
		return -EFAULT;
	}
96

97 98
	/* map pages with kernel virtual space. */
	if (!buffer->kvaddr) {
99 100 101
		if (is_drm_iommu_supported(dev)) {
			unsigned int nr_pages = buffer->size >> PAGE_SHIFT;

102 103
			buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
					nr_pages, VM_MAP,
104
					pgprot_writecombine(PAGE_KERNEL));
105 106 107
		} else {
			phys_addr_t dma_addr = buffer->dma_addr;
			if (dma_addr)
108
				buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
109 110 111
			else
				buffer->kvaddr = (void __iomem *)NULL;
		}
112 113 114 115 116 117
		if (!buffer->kvaddr) {
			DRM_ERROR("failed to map pages to kernel space.\n");
			return -EIO;
		}
	}

118 119 120
	/* buffer count to framebuffer always is 1 at booting time. */
	exynos_drm_fb_set_buf_cnt(fb, 1);

121
	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
122
	offset += fbi->var.yoffset * fb->pitches[0];
123

I
Inki Dae 已提交
124
	fbi->screen_base = buffer->kvaddr + offset;
125
	fbi->screen_size = size;
126 127

	return 0;
128 129 130 131 132 133
}

static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
				    struct drm_fb_helper_surface_size *sizes)
{
	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
134
	struct exynos_drm_gem_obj *exynos_gem_obj;
135 136
	struct drm_device *dev = helper->dev;
	struct fb_info *fbi;
137
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
138
	struct platform_device *pdev = dev->platformdev;
139
	unsigned long size;
140 141 142 143 144 145 146 147
	int ret;

	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
148 149 150
	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
151 152 153 154 155 156 157 158 159 160

	mutex_lock(&dev->struct_mutex);

	fbi = framebuffer_alloc(0, &pdev->dev);
	if (!fbi) {
		DRM_ERROR("failed to allocate fb info.\n");
		ret = -ENOMEM;
		goto out;
	}

161
	size = mode_cmd.pitches[0] * mode_cmd.height;
162

163 164 165 166 167 168 169 170 171 172 173 174
	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
	/*
	 * If physically contiguous memory allocation fails and if IOMMU is
	 * supported then try to get buffer from non physically contiguous
	 * memory area.
	 */
	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
							size);
	}

175 176
	if (IS_ERR(exynos_gem_obj)) {
		ret = PTR_ERR(exynos_gem_obj);
177
		goto err_release_framebuffer;
178 179 180 181 182 183
	}

	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;

	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
			&exynos_gem_obj->base);
184
	if (IS_ERR(helper->fb)) {
185
		DRM_ERROR("failed to create drm framebuffer.\n");
186
		ret = PTR_ERR(helper->fb);
187
		goto err_destroy_gem;
188 189 190 191 192 193 194 195 196 197 198
	}

	helper->fbdev = fbi;

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &exynos_drm_fb_ops;

	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
	if (ret) {
		DRM_ERROR("failed to allocate cmap.\n");
199
		goto err_destroy_framebuffer;
200 201
	}

202
	ret = exynos_drm_fbdev_update(helper, helper->fb);
203 204 205 206 207 208 209 210 211 212 213 214 215 216
	if (ret < 0)
		goto err_dealloc_cmap;

	mutex_unlock(&dev->struct_mutex);
	return ret;

err_dealloc_cmap:
	fb_dealloc_cmap(&fbi->cmap);
err_destroy_framebuffer:
	drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
	exynos_drm_gem_destroy(exynos_gem_obj);
err_release_framebuffer:
	framebuffer_release(fbi);
217 218 219 220 221 222 223 224 225 226 227 228

/*
 * if failed, all resources allocated above would be released by
 * drm_mode_config_cleanup() when drm_load() had been called prior
 * to any specific driver such as fimd or hdmi driver.
 */
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
229
	.fb_probe =	exynos_drm_fbdev_create,
230 231
};

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
{
	struct drm_connector *connector;
	bool ret = false;

	mutex_lock(&dev->mode_config.mutex);
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		if (connector->status != connector_status_connected)
			continue;

		ret = true;
		break;
	}
	mutex_unlock(&dev->mode_config.mutex);

	return ret;
}

250 251 252 253 254 255 256 257 258 259 260
int exynos_drm_fbdev_init(struct drm_device *dev)
{
	struct exynos_drm_fbdev *fbdev;
	struct exynos_drm_private *private = dev->dev_private;
	struct drm_fb_helper *helper;
	unsigned int num_crtc;
	int ret;

	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
		return 0;

261 262 263
	if (!exynos_drm_fbdev_is_anything_connected(dev))
		return 0;

264
	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
265
	if (!fbdev)
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
		return -ENOMEM;

	private->fb_helper = helper = &fbdev->drm_fb_helper;
	helper->funcs = &exynos_drm_fb_helper_funcs;

	num_crtc = dev->mode_config.num_crtc;

	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
	if (ret < 0) {
		DRM_ERROR("failed to initialize drm fb helper.\n");
		goto err_init;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
		goto err_setup;

	}

286 287 288
	/* disable all the possible outputs/crtcs before entering KMS mode */
	drm_helper_disable_unused_functions(dev);

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
	if (ret < 0) {
		DRM_ERROR("failed to set up hw configuration.\n");
		goto err_setup;
	}

	return 0;

err_setup:
	drm_fb_helper_fini(helper);

err_init:
	private->fb_helper = NULL;
	kfree(fbdev);

	return ret;
}

static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
310 311
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
312 313
	struct drm_framebuffer *fb;

314
	if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
315 316
		vunmap(exynos_gem_obj->buffer->kvaddr);

317 318 319
	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
320 321
		if (fb) {
			drm_framebuffer_unregister_private(fb);
R
Rob Clark 已提交
322
			drm_framebuffer_remove(fb);
323
		}
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	}

	/* release linux framebuffer */
	if (fb_helper->fbdev) {
		struct fb_info *info;
		int ret;

		info = fb_helper->fbdev;
		ret = unregister_framebuffer(info);
		if (ret < 0)
			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");

		if (info->cmap.len)
			fb_dealloc_cmap(&info->cmap);

		framebuffer_release(info);
	}

	drm_fb_helper_fini(fb_helper);
}

void exynos_drm_fbdev_fini(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;
	struct exynos_drm_fbdev *fbdev;

	if (!private || !private->fb_helper)
		return;

	fbdev = to_exynos_fbdev(private->fb_helper);

355 356 357
	if (fbdev->exynos_gem_obj)
		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);

358 359 360 361 362 363 364 365 366 367 368 369
	exynos_drm_fbdev_destroy(dev, private->fb_helper);
	kfree(fbdev);
	private->fb_helper = NULL;
}

void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;

	if (!private || !private->fb_helper)
		return;

370
	drm_modeset_lock_all(dev);
371
	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
372
	drm_modeset_unlock_all(dev);
373
}