exynos_drm_fbdev.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8
/* exynos_drm_fbdev.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Authors:
 *	Inki Dae <inki.dae@samsung.com>
 *	Joonyoung Shim <jy0922.shim@samsung.com>
 *	Seung-Woo Kim <sw0312.kim@samsung.com>
 *
9 10 11 12
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
13 14
 */

15 16 17 18
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
19
#include <drm/exynos_drm.h>
20 21 22

#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
M
Mark Brown 已提交
23
#include "exynos_drm_fbdev.h"
I
Inki Dae 已提交
24
#include "exynos_drm_gem.h"
25
#include "exynos_drm_iommu.h"
26 27 28 29 30 31 32 33

#define MAX_CONNECTOR		4
#define PREFERRED_BPP		32

#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
				drm_fb_helper)

struct exynos_drm_fbdev {
34 35
	struct drm_fb_helper		drm_fb_helper;
	struct exynos_drm_gem_obj	*exynos_gem_obj;
36 37
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
static int exynos_drm_fb_mmap(struct fb_info *info,
			struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
	unsigned long vm_size;
	int ret;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;

	vm_size = vma->vm_end - vma->vm_start;

	if (vm_size > buffer->size)
		return -EINVAL;

55
	ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 57 58 59 60 61 62 63 64
		buffer->dma_addr, buffer->size, &buffer->dma_attrs);
	if (ret < 0) {
		DRM_ERROR("failed to mmap.\n");
		return ret;
	}

	return 0;
}

65 66
static struct fb_ops exynos_drm_fb_ops = {
	.owner		= THIS_MODULE,
67
	.fb_mmap        = exynos_drm_fb_mmap,
68 69 70
	.fb_fillrect	= drm_fb_helper_cfb_fillrect,
	.fb_copyarea	= drm_fb_helper_cfb_copyarea,
	.fb_imageblit	= drm_fb_helper_cfb_imageblit,
71
	.fb_check_var	= drm_fb_helper_check_var,
72
	.fb_set_par	= drm_fb_helper_set_par,
73 74 75 76 77
	.fb_blank	= drm_fb_helper_blank,
	.fb_pan_display	= drm_fb_helper_pan_display,
	.fb_setcmap	= drm_fb_helper_setcmap,
};

78
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79
				     struct drm_fb_helper_surface_size *sizes,
80
				     struct drm_framebuffer *fb)
81 82
{
	struct fb_info *fbi = helper->fbdev;
I
Inki Dae 已提交
83
	struct exynos_drm_gem_buf *buffer;
84
	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
85
	unsigned int nr_pages;
86
	unsigned long offset;
87

88
	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
89
	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
90

S
Seung-Woo Kim 已提交
91 92
	/* RGB formats use only one buffer */
	buffer = exynos_drm_fb_buffer(fb, 0);
I
Inki Dae 已提交
93
	if (!buffer) {
94
		DRM_DEBUG_KMS("buffer is null.\n");
95 96
		return -EFAULT;
	}
97

98 99 100 101 102
	nr_pages = buffer->size >> PAGE_SHIFT;

	buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
			nr_pages, VM_MAP,
			pgprot_writecombine(PAGE_KERNEL));
103
	if (!buffer->kvaddr) {
104 105
		DRM_ERROR("failed to map pages to kernel space.\n");
		return -EIO;
106 107
	}

108 109 110
	/* buffer count to framebuffer always is 1 at booting time. */
	exynos_drm_fb_set_buf_cnt(fb, 1);

111
	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
112
	offset += fbi->var.yoffset * fb->pitches[0];
113

I
Inki Dae 已提交
114
	fbi->screen_base = buffer->kvaddr + offset;
115
	fbi->screen_size = size;
116
	fbi->fix.smem_len = size;
117 118

	return 0;
119 120 121 122 123 124
}

static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
				    struct drm_fb_helper_surface_size *sizes)
{
	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
125
	struct exynos_drm_gem_obj *exynos_gem_obj;
126 127
	struct drm_device *dev = helper->dev;
	struct fb_info *fbi;
128
	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
129
	struct platform_device *pdev = dev->platformdev;
130
	unsigned long size;
131 132 133 134 135 136 137 138
	int ret;

	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
			sizes->surface_width, sizes->surface_height,
			sizes->surface_bpp);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
139 140 141
	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
142 143 144

	mutex_lock(&dev->struct_mutex);

145 146
	fbi = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(fbi)) {
147
		DRM_ERROR("failed to allocate fb info.\n");
148
		ret = PTR_ERR(fbi);
149 150 151
		goto out;
	}

152
	size = mode_cmd.pitches[0] * mode_cmd.height;
153

154 155 156 157 158 159 160 161 162 163 164 165
	exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
	/*
	 * If physically contiguous memory allocation fails and if IOMMU is
	 * supported then try to get buffer from non physically contiguous
	 * memory area.
	 */
	if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
		dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
		exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
							size);
	}

166 167
	if (IS_ERR(exynos_gem_obj)) {
		ret = PTR_ERR(exynos_gem_obj);
168
		goto err_release_fbi;
169 170 171 172 173 174
	}

	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;

	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
			&exynos_gem_obj->base);
175
	if (IS_ERR(helper->fb)) {
176
		DRM_ERROR("failed to create drm framebuffer.\n");
177
		ret = PTR_ERR(helper->fb);
178
		goto err_destroy_gem;
179 180 181 182 183 184
	}

	fbi->par = helper;
	fbi->flags = FBINFO_FLAG_DEFAULT;
	fbi->fbops = &exynos_drm_fb_ops;

185
	ret = exynos_drm_fbdev_update(helper, sizes, helper->fb);
186
	if (ret < 0)
187
		goto err_destroy_framebuffer;
188 189 190 191 192 193 194 195

	mutex_unlock(&dev->struct_mutex);
	return ret;

err_destroy_framebuffer:
	drm_framebuffer_cleanup(helper->fb);
err_destroy_gem:
	exynos_drm_gem_destroy(exynos_gem_obj);
196 197
err_release_fbi:
	drm_fb_helper_release_fbi(helper);
198 199 200 201 202 203 204 205 206 207 208

/*
 * if failed, all resources allocated above would be released by
 * drm_mode_config_cleanup() when drm_load() had been called prior
 * to any specific driver such as fimd or hdmi driver.
 */
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

209
static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
210
	.fb_probe =	exynos_drm_fbdev_create,
211 212
};

213
static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
{
	struct drm_connector *connector;
	bool ret = false;

	mutex_lock(&dev->mode_config.mutex);
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		if (connector->status != connector_status_connected)
			continue;

		ret = true;
		break;
	}
	mutex_unlock(&dev->mode_config.mutex);

	return ret;
}

231 232 233 234 235 236 237 238 239 240 241
int exynos_drm_fbdev_init(struct drm_device *dev)
{
	struct exynos_drm_fbdev *fbdev;
	struct exynos_drm_private *private = dev->dev_private;
	struct drm_fb_helper *helper;
	unsigned int num_crtc;
	int ret;

	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
		return 0;

242 243 244
	if (!exynos_drm_fbdev_is_anything_connected(dev))
		return 0;

245
	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
246
	if (!fbdev)
247 248 249
		return -ENOMEM;

	private->fb_helper = helper = &fbdev->drm_fb_helper;
250 251

	drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

	num_crtc = dev->mode_config.num_crtc;

	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
	if (ret < 0) {
		DRM_ERROR("failed to initialize drm fb helper.\n");
		goto err_init;
	}

	ret = drm_fb_helper_single_add_all_connectors(helper);
	if (ret < 0) {
		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
		goto err_setup;

	}

	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
	if (ret < 0) {
		DRM_ERROR("failed to set up hw configuration.\n");
		goto err_setup;
	}

	return 0;

err_setup:
	drm_fb_helper_fini(helper);

err_init:
	private->fb_helper = NULL;
	kfree(fbdev);

	return ret;
}

static void exynos_drm_fbdev_destroy(struct drm_device *dev,
				      struct drm_fb_helper *fb_helper)
{
289 290
	struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
	struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
291 292
	struct drm_framebuffer *fb;

293
	if (exynos_gem_obj->buffer->kvaddr)
294 295
		vunmap(exynos_gem_obj->buffer->kvaddr);

296 297 298
	/* release drm framebuffer and real buffer */
	if (fb_helper->fb && fb_helper->fb->funcs) {
		fb = fb_helper->fb;
299 300
		if (fb) {
			drm_framebuffer_unregister_private(fb);
R
Rob Clark 已提交
301
			drm_framebuffer_remove(fb);
302
		}
303 304
	}

305 306
	drm_fb_helper_unregister_fbi(fb_helper);
	drm_fb_helper_release_fbi(fb_helper);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332

	drm_fb_helper_fini(fb_helper);
}

void exynos_drm_fbdev_fini(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;
	struct exynos_drm_fbdev *fbdev;

	if (!private || !private->fb_helper)
		return;

	fbdev = to_exynos_fbdev(private->fb_helper);

	exynos_drm_fbdev_destroy(dev, private->fb_helper);
	kfree(fbdev);
	private->fb_helper = NULL;
}

void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
{
	struct exynos_drm_private *private = dev->dev_private;

	if (!private || !private->fb_helper)
		return;

333
	drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
334
}