sti_drv.c 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Copyright (C) STMicroelectronics SA 2014
 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
 * License terms:  GNU General Public License (GPL), version 2
 */

#include <drm/drmP.h>

#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>

15 16
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
17 18 19 20
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>

21 22
#include "sti_crtc.h"
#include "sti_drv.h"
23 24 25 26 27 28 29 30 31 32

#define DRIVER_NAME	"sti"
#define DRIVER_DESC	"STMicroelectronics SoC DRM"
#define DRIVER_DATE	"20140601"
#define DRIVER_MAJOR	1
#define DRIVER_MINOR	0

#define STI_MAX_FB_HEIGHT	4096
#define STI_MAX_FB_WIDTH	4096

33 34
static void sti_atomic_schedule(struct sti_private *private,
				struct drm_atomic_state *state)
35 36 37 38 39
{
	private->commit.state = state;
	schedule_work(&private->commit.work);
}

40 41
static void sti_atomic_complete(struct sti_private *private,
				struct drm_atomic_state *state)
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
{
	struct drm_device *drm = private->drm_dev;

	/*
	 * Everything below can be run asynchronously without the need to grab
	 * any modeset locks at all under one condition: It must be guaranteed
	 * that the asynchronous work has either been cancelled (if the driver
	 * supports it, which at least requires that the framebuffers get
	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
	 * before the new state gets committed on the software side with
	 * drm_atomic_helper_swap_state().
	 *
	 * This scheme allows new atomic state updates to be prepared and
	 * checked in parallel to the asynchronous completion of the previous
	 * update. Which is important since compositors need to figure out the
	 * composition of the next frame right after having submitted the
	 * current layout.
	 */

	drm_atomic_helper_commit_modeset_disables(drm, state);
62
	drm_atomic_helper_commit_planes(drm, state, false);
63 64 65 66 67 68 69 70
	drm_atomic_helper_commit_modeset_enables(drm, state);

	drm_atomic_helper_wait_for_vblanks(drm, state);

	drm_atomic_helper_cleanup_planes(drm, state);
	drm_atomic_state_free(state);
}

71
static void sti_atomic_work(struct work_struct *work)
72
{
73 74
	struct sti_private *private = container_of(work,
			struct sti_private, commit.work);
75

76
	sti_atomic_complete(private, private->commit.state);
77 78
}

79 80
static int sti_atomic_commit(struct drm_device *drm,
			     struct drm_atomic_state *state, bool async)
81
{
82
	struct sti_private *private = drm->dev_private;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	int err;

	err = drm_atomic_helper_prepare_planes(drm, state);
	if (err)
		return err;

	/* serialize outstanding asynchronous commits */
	mutex_lock(&private->commit.lock);
	flush_work(&private->commit.work);

	/*
	 * This is the point of no return - everything below never fails except
	 * when the hw goes bonghits. Which means we can commit the new state on
	 * the software side now.
	 */

	drm_atomic_helper_swap_state(drm, state);

	if (async)
102
		sti_atomic_schedule(private, state);
103
	else
104
		sti_atomic_complete(private, state);
105 106 107 108 109

	mutex_unlock(&private->commit.lock);
	return 0;
}

110
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
111
	.fb_create = drm_fb_cma_create,
112
	.atomic_check = drm_atomic_helper_check,
113
	.atomic_commit = sti_atomic_commit,
114 115
};

116
static void sti_mode_config_init(struct drm_device *dev)
117 118 119 120 121 122 123 124 125
{
	dev->mode_config.min_width = 0;
	dev->mode_config.min_height = 0;

	/*
	 * set max width and height as default value.
	 * this value would be used to check framebuffer size limitation
	 * at drm_mode_addfb().
	 */
126 127
	dev->mode_config.max_width = STI_MAX_FB_WIDTH;
	dev->mode_config.max_height = STI_MAX_FB_HEIGHT;
128

129
	dev->mode_config.funcs = &sti_mode_config_funcs;
130 131
}

132
static int sti_load(struct drm_device *dev, unsigned long flags)
133
{
134
	struct sti_private *private;
135 136
	int ret;

137
	private = kzalloc(sizeof(*private), GFP_KERNEL);
138 139 140 141 142 143 144
	if (!private) {
		DRM_ERROR("Failed to allocate private\n");
		return -ENOMEM;
	}
	dev->dev_private = (void *)private;
	private->drm_dev = dev;

145
	mutex_init(&private->commit.lock);
146
	INIT_WORK(&private->commit.work, sti_atomic_work);
147

148 149 150
	drm_mode_config_init(dev);
	drm_kms_helper_poll_init(dev);

151
	sti_mode_config_init(dev);
152 153

	ret = component_bind_all(dev->dev, dev);
154 155 156 157
	if (ret) {
		drm_kms_helper_poll_fini(dev);
		drm_mode_config_cleanup(dev);
		kfree(private);
158
		return ret;
159
	}
160

161
	drm_mode_config_reset(dev);
162 163

	drm_fbdev_cma_init(dev, 32,
164 165
			   dev->mode_config.num_crtc,
			   dev->mode_config.num_connector);
166

167 168 169
	return 0;
}

170
static const struct file_operations sti_driver_fops = {
171 172 173 174 175 176 177 178 179 180 181 182
	.owner = THIS_MODULE,
	.open = drm_open,
	.mmap = drm_gem_cma_mmap,
	.poll = drm_poll,
	.read = drm_read,
	.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl = drm_compat_ioctl,
#endif
	.release = drm_release,
};

183 184 185
static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
					    struct drm_gem_object *obj,
					    int flags)
186 187 188 189 190 191
{
	/* we want to be able to write in mmapped buffer */
	flags |= O_RDWR;
	return drm_gem_prime_export(dev, obj, flags);
}

192
static struct drm_driver sti_driver = {
193 194
	.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
	    DRIVER_GEM | DRIVER_PRIME,
195
	.load = sti_load,
196 197 198 199 200
	.gem_free_object = drm_gem_cma_free_object,
	.gem_vm_ops = &drm_gem_cma_vm_ops,
	.dumb_create = drm_gem_cma_dumb_create,
	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
	.dumb_destroy = drm_gem_dumb_destroy,
201
	.fops = &sti_driver_fops,
202

203
	.get_vblank_counter = drm_vblank_no_hw_counter,
204 205
	.enable_vblank = sti_crtc_enable_vblank,
	.disable_vblank = sti_crtc_disable_vblank,
206 207 208

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
209
	.gem_prime_export = sti_gem_prime_export,
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	.gem_prime_import = drm_gem_prime_import,
	.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
	.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
	.gem_prime_vmap = drm_gem_cma_prime_vmap,
	.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
	.gem_prime_mmap = drm_gem_cma_prime_mmap,

	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = DRIVER_MAJOR,
	.minor = DRIVER_MINOR,
};

static int compare_of(struct device *dev, void *data)
{
	return dev->of_node == data;
}

229
static int sti_bind(struct device *dev)
230
{
231
	return drm_platform_init(&sti_driver, to_platform_device(dev));
232 233
}

234
static void sti_unbind(struct device *dev)
235 236 237 238
{
	drm_put_dev(dev_get_drvdata(dev));
}

239 240 241
static const struct component_master_ops sti_ops = {
	.bind = sti_bind,
	.unbind = sti_unbind,
242 243
};

244
static int sti_platform_probe(struct platform_device *pdev)
245 246
{
	struct device *dev = &pdev->dev;
247
	struct device_node *node = dev->of_node;
248 249 250 251 252
	struct device_node *child_np;
	struct component_match *match = NULL;

	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));

253 254
	of_platform_populate(node, NULL, NULL, dev);

255 256 257 258 259 260 261 262
	child_np = of_get_next_available_child(node, NULL);

	while (child_np) {
		component_match_add(dev, &match, compare_of, child_np);
		of_node_put(child_np);
		child_np = of_get_next_available_child(node, child_np);
	}

263
	return component_master_add_with_match(dev, &sti_ops, match);
264 265
}

266
static int sti_platform_remove(struct platform_device *pdev)
267
{
268
	component_master_del(&pdev->dev, &sti_ops);
269
	of_platform_depopulate(&pdev->dev);
270

271 272 273
	return 0;
}

274
static const struct of_device_id sti_dt_ids[] = {
275 276 277
	{ .compatible = "st,sti-display-subsystem", },
	{ /* end node */ },
};
278
MODULE_DEVICE_TABLE(of, sti_dt_ids);
279

280 281 282
static struct platform_driver sti_platform_driver = {
	.probe = sti_platform_probe,
	.remove = sti_platform_remove,
283 284
	.driver = {
		.name = DRIVER_NAME,
285
		.of_match_table = sti_dt_ids,
286 287 288
	},
};

T
Thierry Reding 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
static struct platform_driver * const drivers[] = {
	&sti_tvout_driver,
	&sti_vtac_driver,
	&sti_hqvdp_driver,
	&sti_hdmi_driver,
	&sti_hda_driver,
	&sti_dvo_driver,
	&sti_vtg_driver,
	&sti_compositor_driver,
	&sti_platform_driver,
};

static int sti_drm_init(void)
{
	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(sti_drm_init);

static void sti_drm_exit(void)
{
	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(sti_drm_exit);
312 313 314 315

MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
MODULE_LICENSE("GPL");