dsi.c 139.7 KB
Newer Older
T
Tomi Valkeinen 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * linux/drivers/video/omap2/dss/dsi.c
 *
 * Copyright (C) 2009 Nokia Corporation
 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#define DSS_SUBSYS_NAME "DSI"

#include <linux/kernel.h>
23 24
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
T
Tomi Valkeinen 已提交
25 26 27 28 29 30 31
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
32
#include <linux/module.h>
33
#include <linux/semaphore.h>
T
Tomi Valkeinen 已提交
34 35 36 37
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/wait.h>
38
#include <linux/workqueue.h>
39
#include <linux/sched.h>
40
#include <linux/slab.h>
41
#include <linux/debugfs.h>
42
#include <linux/pm_runtime.h>
T
Tomi Valkeinen 已提交
43
#include <linux/of.h>
44
#include <linux/of_graph.h>
T
Tomi Valkeinen 已提交
45
#include <linux/of_platform.h>
T
Tomi Valkeinen 已提交
46
#include <linux/component.h>
47
#include <linux/sys_soc.h>
T
Tomi Valkeinen 已提交
48

49
#include <video/mipi_display.h>
T
Tomi Valkeinen 已提交
50

51
#include "omapdss.h"
T
Tomi Valkeinen 已提交
52
#include "dss.h"
53
#include "dss_features.h"
T
Tomi Valkeinen 已提交
54 55 56

#define DSI_CATCH_MISSING_TE

57
struct dsi_reg { u16 module; u16 idx; };
T
Tomi Valkeinen 已提交
58

59
#define DSI_REG(mod, idx)		((const struct dsi_reg) { mod, idx })
T
Tomi Valkeinen 已提交
60 61 62

/* DSI Protocol Engine */

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
#define DSI_PROTO			0
#define DSI_PROTO_SZ			0x200

#define DSI_REVISION			DSI_REG(DSI_PROTO, 0x0000)
#define DSI_SYSCONFIG			DSI_REG(DSI_PROTO, 0x0010)
#define DSI_SYSSTATUS			DSI_REG(DSI_PROTO, 0x0014)
#define DSI_IRQSTATUS			DSI_REG(DSI_PROTO, 0x0018)
#define DSI_IRQENABLE			DSI_REG(DSI_PROTO, 0x001C)
#define DSI_CTRL			DSI_REG(DSI_PROTO, 0x0040)
#define DSI_GNQ				DSI_REG(DSI_PROTO, 0x0044)
#define DSI_COMPLEXIO_CFG1		DSI_REG(DSI_PROTO, 0x0048)
#define DSI_COMPLEXIO_IRQ_STATUS	DSI_REG(DSI_PROTO, 0x004C)
#define DSI_COMPLEXIO_IRQ_ENABLE	DSI_REG(DSI_PROTO, 0x0050)
#define DSI_CLK_CTRL			DSI_REG(DSI_PROTO, 0x0054)
#define DSI_TIMING1			DSI_REG(DSI_PROTO, 0x0058)
#define DSI_TIMING2			DSI_REG(DSI_PROTO, 0x005C)
#define DSI_VM_TIMING1			DSI_REG(DSI_PROTO, 0x0060)
#define DSI_VM_TIMING2			DSI_REG(DSI_PROTO, 0x0064)
#define DSI_VM_TIMING3			DSI_REG(DSI_PROTO, 0x0068)
#define DSI_CLK_TIMING			DSI_REG(DSI_PROTO, 0x006C)
#define DSI_TX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0070)
#define DSI_RX_FIFO_VC_SIZE		DSI_REG(DSI_PROTO, 0x0074)
#define DSI_COMPLEXIO_CFG2		DSI_REG(DSI_PROTO, 0x0078)
#define DSI_RX_FIFO_VC_FULLNESS		DSI_REG(DSI_PROTO, 0x007C)
#define DSI_VM_TIMING4			DSI_REG(DSI_PROTO, 0x0080)
#define DSI_TX_FIFO_VC_EMPTINESS	DSI_REG(DSI_PROTO, 0x0084)
#define DSI_VM_TIMING5			DSI_REG(DSI_PROTO, 0x0088)
#define DSI_VM_TIMING6			DSI_REG(DSI_PROTO, 0x008C)
#define DSI_VM_TIMING7			DSI_REG(DSI_PROTO, 0x0090)
#define DSI_STOPCLK_TIMING		DSI_REG(DSI_PROTO, 0x0094)
#define DSI_VC_CTRL(n)			DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20))
#define DSI_VC_TE(n)			DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20))
#define DSI_VC_LONG_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20))
#define DSI_VC_LONG_PACKET_PAYLOAD(n)	DSI_REG(DSI_PROTO, 0x010C + (n * 0x20))
#define DSI_VC_SHORT_PACKET_HEADER(n)	DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20))
#define DSI_VC_IRQSTATUS(n)		DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20))
#define DSI_VC_IRQENABLE(n)		DSI_REG(DSI_PROTO, 0x011C + (n * 0x20))
T
Tomi Valkeinen 已提交
100 101 102

/* DSIPHY_SCP */

103 104 105 106 107 108 109 110 111
#define DSI_PHY				1
#define DSI_PHY_OFFSET			0x200
#define DSI_PHY_SZ			0x40

#define DSI_DSIPHY_CFG0			DSI_REG(DSI_PHY, 0x0000)
#define DSI_DSIPHY_CFG1			DSI_REG(DSI_PHY, 0x0004)
#define DSI_DSIPHY_CFG2			DSI_REG(DSI_PHY, 0x0008)
#define DSI_DSIPHY_CFG5			DSI_REG(DSI_PHY, 0x0014)
#define DSI_DSIPHY_CFG10		DSI_REG(DSI_PHY, 0x0028)
T
Tomi Valkeinen 已提交
112 113 114

/* DSI_PLL_CTRL_SCP */

115 116 117 118 119 120 121 122 123
#define DSI_PLL				2
#define DSI_PLL_OFFSET			0x300
#define DSI_PLL_SZ			0x20

#define DSI_PLL_CONTROL			DSI_REG(DSI_PLL, 0x0000)
#define DSI_PLL_STATUS			DSI_REG(DSI_PLL, 0x0004)
#define DSI_PLL_GO			DSI_REG(DSI_PLL, 0x0008)
#define DSI_PLL_CONFIGURATION1		DSI_REG(DSI_PLL, 0x000C)
#define DSI_PLL_CONFIGURATION2		DSI_REG(DSI_PLL, 0x0010)
T
Tomi Valkeinen 已提交
124

125 126
#define REG_GET(dsidev, idx, start, end) \
	FLD_GET(dsi_read_reg(dsidev, idx), start, end)
T
Tomi Valkeinen 已提交
127

128 129
#define REG_FLD_MOD(dsidev, idx, val, start, end) \
	dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
T
Tomi Valkeinen 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150

/* Global interrupts */
#define DSI_IRQ_VC0		(1 << 0)
#define DSI_IRQ_VC1		(1 << 1)
#define DSI_IRQ_VC2		(1 << 2)
#define DSI_IRQ_VC3		(1 << 3)
#define DSI_IRQ_WAKEUP		(1 << 4)
#define DSI_IRQ_RESYNC		(1 << 5)
#define DSI_IRQ_PLL_LOCK	(1 << 7)
#define DSI_IRQ_PLL_UNLOCK	(1 << 8)
#define DSI_IRQ_PLL_RECALL	(1 << 9)
#define DSI_IRQ_COMPLEXIO_ERR	(1 << 10)
#define DSI_IRQ_HS_TX_TIMEOUT	(1 << 14)
#define DSI_IRQ_LP_RX_TIMEOUT	(1 << 15)
#define DSI_IRQ_TE_TRIGGER	(1 << 16)
#define DSI_IRQ_ACK_TRIGGER	(1 << 17)
#define DSI_IRQ_SYNC_LOST	(1 << 18)
#define DSI_IRQ_LDO_POWER_GOOD	(1 << 19)
#define DSI_IRQ_TA_TIMEOUT	(1 << 20)
#define DSI_IRQ_ERROR_MASK \
	(DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
151
	DSI_IRQ_TA_TIMEOUT)
T
Tomi Valkeinen 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
#define DSI_IRQ_CHANNEL_MASK	0xf

/* Virtual channel interrupts */
#define DSI_VC_IRQ_CS		(1 << 0)
#define DSI_VC_IRQ_ECC_CORR	(1 << 1)
#define DSI_VC_IRQ_PACKET_SENT	(1 << 2)
#define DSI_VC_IRQ_FIFO_TX_OVF	(1 << 3)
#define DSI_VC_IRQ_FIFO_RX_OVF	(1 << 4)
#define DSI_VC_IRQ_BTA		(1 << 5)
#define DSI_VC_IRQ_ECC_NO_CORR	(1 << 6)
#define DSI_VC_IRQ_FIFO_TX_UDF	(1 << 7)
#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
#define DSI_VC_IRQ_ERROR_MASK \
	(DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
	DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
	DSI_VC_IRQ_FIFO_TX_UDF)

/* ComplexIO interrupts */
#define DSI_CIO_IRQ_ERRSYNCESC1		(1 << 0)
#define DSI_CIO_IRQ_ERRSYNCESC2		(1 << 1)
#define DSI_CIO_IRQ_ERRSYNCESC3		(1 << 2)
173 174
#define DSI_CIO_IRQ_ERRSYNCESC4		(1 << 3)
#define DSI_CIO_IRQ_ERRSYNCESC5		(1 << 4)
T
Tomi Valkeinen 已提交
175 176 177
#define DSI_CIO_IRQ_ERRESC1		(1 << 5)
#define DSI_CIO_IRQ_ERRESC2		(1 << 6)
#define DSI_CIO_IRQ_ERRESC3		(1 << 7)
178 179
#define DSI_CIO_IRQ_ERRESC4		(1 << 8)
#define DSI_CIO_IRQ_ERRESC5		(1 << 9)
T
Tomi Valkeinen 已提交
180 181 182
#define DSI_CIO_IRQ_ERRCONTROL1		(1 << 10)
#define DSI_CIO_IRQ_ERRCONTROL2		(1 << 11)
#define DSI_CIO_IRQ_ERRCONTROL3		(1 << 12)
183 184
#define DSI_CIO_IRQ_ERRCONTROL4		(1 << 13)
#define DSI_CIO_IRQ_ERRCONTROL5		(1 << 14)
T
Tomi Valkeinen 已提交
185 186 187
#define DSI_CIO_IRQ_STATEULPS1		(1 << 15)
#define DSI_CIO_IRQ_STATEULPS2		(1 << 16)
#define DSI_CIO_IRQ_STATEULPS3		(1 << 17)
188 189
#define DSI_CIO_IRQ_STATEULPS4		(1 << 18)
#define DSI_CIO_IRQ_STATEULPS5		(1 << 19)
T
Tomi Valkeinen 已提交
190 191 192 193 194 195
#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1	(1 << 20)
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1	(1 << 21)
#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2	(1 << 22)
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2	(1 << 23)
#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3	(1 << 24)
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3	(1 << 25)
196 197 198 199
#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4	(1 << 26)
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4	(1 << 27)
#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5	(1 << 28)
#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5	(1 << 29)
T
Tomi Valkeinen 已提交
200 201
#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0	(1 << 30)
#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1	(1 << 31)
202 203
#define DSI_CIO_IRQ_ERROR_MASK \
	(DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
204 205 206 207 208 209 210 211
	 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
	 DSI_CIO_IRQ_ERRSYNCESC5 | \
	 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
	 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
	 DSI_CIO_IRQ_ERRESC5 | \
	 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
	 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
	 DSI_CIO_IRQ_ERRCONTROL5 | \
212 213
	 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
	 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
214 215 216
	 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
	 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
	 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
T
Tomi Valkeinen 已提交
217

218 219
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);

220
static int dsi_display_init_dispc(struct platform_device *dsidev,
221
	enum omap_channel channel);
222
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
223
	enum omap_channel channel);
224

225 226
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);

227 228 229 230
/* DSI PLL HSDIV indices */
#define HSDIV_DISPC	0
#define HSDIV_DSI	1

231
#define DSI_MAX_NR_ISRS                2
T
Tomi Valkeinen 已提交
232 233
#define DSI_MAX_NR_LANES	5

234 235 236 237 238 239
enum dsi_model {
	DSI_MODEL_OMAP3,
	DSI_MODEL_OMAP4,
	DSI_MODEL_OMAP5,
};

T
Tomi Valkeinen 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252
enum dsi_lane_function {
	DSI_LANE_UNUSED	= 0,
	DSI_LANE_CLK,
	DSI_LANE_DATA1,
	DSI_LANE_DATA2,
	DSI_LANE_DATA3,
	DSI_LANE_DATA4,
};

struct dsi_lane_config {
	enum dsi_lane_function function;
	u8 polarity;
};
253 254 255 256 257 258 259

struct dsi_isr_data {
	omap_dsi_isr_t	isr;
	void		*arg;
	u32		mask;
};

T
Tomi Valkeinen 已提交
260 261 262 263 264 265 266 267
enum fifo_size {
	DSI_FIFO_SIZE_0		= 0,
	DSI_FIFO_SIZE_32	= 1,
	DSI_FIFO_SIZE_64	= 2,
	DSI_FIFO_SIZE_96	= 3,
	DSI_FIFO_SIZE_128	= 4,
};

268 269 270
enum dsi_vc_source {
	DSI_VC_SOURCE_L4 = 0,
	DSI_VC_SOURCE_VP,
T
Tomi Valkeinen 已提交
271 272
};

273 274 275 276 277 278 279 280
struct dsi_irq_stats {
	unsigned long last_reset;
	unsigned irq_count;
	unsigned dsi_irqs[32];
	unsigned vc_irqs[4][32];
	unsigned cio_irqs[32];
};

281 282 283 284 285 286
struct dsi_isr_tables {
	struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
	struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
	struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
};

287 288
struct dsi_clk_calc_ctx {
	struct platform_device *dsidev;
289
	struct dss_pll *pll;
290 291 292 293 294 295 296 297 298

	/* inputs */

	const struct omap_dss_dsi_config *config;

	unsigned long req_pck_min, req_pck_nom, req_pck_max;

	/* outputs */

299
	struct dss_pll_clock_info dsi_cinfo;
300 301
	struct dispc_clock_info dispc_cinfo;

302
	struct videomode vm;
303 304 305
	struct omap_dss_dsi_videomode_timings dsi_vm;
};

306 307 308 309 310
struct dsi_lp_clock_info {
	unsigned long lp_clk;
	u16 lp_clk_div;
};

311 312 313 314 315
struct dsi_module_id_data {
	u32 address;
	int id;
};

316 317 318 319 320 321 322 323 324
enum dsi_quirks {
	DSI_QUIRK_PLL_PWR_BUG = (1 << 0),	/* DSI-PLL power command 0x3 is not working */
	DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
	DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
	DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
	DSI_QUIRK_GNQ = (1 << 4),
	DSI_QUIRK_PHY_DCC = (1 << 5),
};

325 326 327 328
struct dsi_of_data {
	enum dsi_model model;
	const struct dss_pll_hw *pll_hw;
	const struct dsi_module_id_data *modules;
329
	enum dsi_quirks quirks;
330 331
};

332
struct dsi_data {
333
	struct platform_device *pdev;
334 335 336
	void __iomem *proto_base;
	void __iomem *phy_base;
	void __iomem *pll_base;
337

338
	const struct dsi_of_data *data;
339 340
	int module_id;

341
	int irq;
T
Tomi Valkeinen 已提交
342

343 344
	bool is_enabled;

345
	struct clk *dss_clk;
346
	struct regmap *syscon;
347

348
	struct dispc_clock_info user_dispc_cinfo;
349
	struct dss_pll_clock_info user_dsi_cinfo;
T
Tomi Valkeinen 已提交
350

351 352 353
	struct dsi_lp_clock_info user_lp_cinfo;
	struct dsi_lp_clock_info current_lp_cinfo;

354 355
	struct dss_pll pll;

356
	bool vdds_dsi_enabled;
T
Tomi Valkeinen 已提交
357 358 359
	struct regulator *vdds_dsi_reg;

	struct {
360
		enum dsi_vc_source source;
T
Tomi Valkeinen 已提交
361
		struct omap_dss_device *dssdev;
T
Tomi Valkeinen 已提交
362 363
		enum fifo_size tx_fifo_size;
		enum fifo_size rx_fifo_size;
364
		int vc_id;
T
Tomi Valkeinen 已提交
365 366 367
	} vc[4];

	struct mutex lock;
368
	struct semaphore bus_lock;
T
Tomi Valkeinen 已提交
369

370 371 372 373 374
	spinlock_t irq_lock;
	struct dsi_isr_tables isr_tables;
	/* space for a copy used by the interrupt handler */
	struct dsi_isr_tables isr_tables_copy;

375
	int update_channel;
376
#ifdef DSI_PERF_MEASURE
377 378
	unsigned update_bytes;
#endif
T
Tomi Valkeinen 已提交
379 380

	bool te_enabled;
381
	bool ulps_enabled;
T
Tomi Valkeinen 已提交
382

383 384 385 386 387
	void (*framedone_callback)(int, void *);
	void *framedone_data;

	struct delayed_work framedone_timeout_work;

T
Tomi Valkeinen 已提交
388 389 390 391 392 393
#ifdef DSI_CATCH_MISSING_TE
	struct timer_list te_timer;
#endif

	unsigned long cache_req_pck;
	unsigned long cache_clk_freq;
394
	struct dss_pll_clock_info cache_cinfo;
T
Tomi Valkeinen 已提交
395 396 397

	u32		errors;
	spinlock_t	errors_lock;
398
#ifdef DSI_PERF_MEASURE
T
Tomi Valkeinen 已提交
399 400 401 402 403
	ktime_t perf_setup_time;
	ktime_t perf_start_time;
#endif
	int debug_read;
	int debug_write;
404 405 406 407 408

#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
	spinlock_t irq_stats_lock;
	struct dsi_irq_stats irq_stats;
#endif
409

410
	unsigned num_lanes_supported;
411
	unsigned line_buffer_size;
412

T
Tomi Valkeinen 已提交
413 414
	struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
	unsigned num_lanes_used;
415

416
	unsigned scp_clk_refcount;
417 418

	struct dss_lcd_mgr_config mgr_config;
419
	struct videomode vm;
420
	enum omap_dss_dsi_pixel_format pix_fmt;
421
	enum omap_dss_dsi_mode mode;
422
	struct omap_dss_dsi_videomode_timings vm_timings;
423

424
	struct omap_dss_device output;
425
};
T
Tomi Valkeinen 已提交
426

427 428 429 430 431
struct dsi_packet_sent_handler_data {
	struct platform_device *dsidev;
	struct completion *completion;
};

432
#ifdef DSI_PERF_MEASURE
433 434
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
T
Tomi Valkeinen 已提交
435 436
#endif

437 438 439 440 441
static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
{
	return dev_get_drvdata(&dsidev->dev);
}

442 443
static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
{
444
	return to_platform_device(dssdev->dev);
445 446
}

447
static struct platform_device *dsi_get_dsidev_from_id(int module)
448
{
449
	struct omap_dss_device *out;
450 451
	enum omap_dss_output_id	id;

452 453 454 455 456 457 458 459 460 461
	switch (module) {
	case 0:
		id = OMAP_DSS_OUTPUT_DSI1;
		break;
	case 1:
		id = OMAP_DSS_OUTPUT_DSI2;
		break;
	default:
		return NULL;
	}
462 463 464

	out = omap_dss_get_output(id);

465
	return out ? to_platform_device(out->dev) : NULL;
466 467 468 469
}

static inline void dsi_write_reg(struct platform_device *dsidev,
		const struct dsi_reg idx, u32 val)
T
Tomi Valkeinen 已提交
470
{
471
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
472 473 474 475 476 477 478 479
	void __iomem *base;

	switch(idx.module) {
		case DSI_PROTO: base = dsi->proto_base; break;
		case DSI_PHY: base = dsi->phy_base; break;
		case DSI_PLL: base = dsi->pll_base; break;
		default: return;
	}
480

481
	__raw_writel(val, base + idx.idx);
T
Tomi Valkeinen 已提交
482 483
}

484 485
static inline u32 dsi_read_reg(struct platform_device *dsidev,
		const struct dsi_reg idx)
T
Tomi Valkeinen 已提交
486
{
487
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
488
	void __iomem *base;
489

490 491 492 493 494 495 496 497
	switch(idx.module) {
		case DSI_PROTO: base = dsi->proto_base; break;
		case DSI_PHY: base = dsi->phy_base; break;
		case DSI_PLL: base = dsi->pll_base; break;
		default: return 0;
	}

	return __raw_readl(base + idx.idx);
T
Tomi Valkeinen 已提交
498 499
}

500
static void dsi_bus_lock(struct omap_dss_device *dssdev)
T
Tomi Valkeinen 已提交
501
{
502 503 504 505
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	down(&dsi->bus_lock);
T
Tomi Valkeinen 已提交
506 507
}

508
static void dsi_bus_unlock(struct omap_dss_device *dssdev)
T
Tomi Valkeinen 已提交
509
{
510 511 512 513
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	up(&dsi->bus_lock);
T
Tomi Valkeinen 已提交
514 515
}

516
static bool dsi_bus_is_locked(struct platform_device *dsidev)
517
{
518 519 520
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	return dsi->bus_lock.count == 0;
521 522
}

523 524 525 526 527
static void dsi_completion_handler(void *data, u32 mask)
{
	complete((struct completion *)data);
}

528 529
static inline int wait_for_bit_change(struct platform_device *dsidev,
		const struct dsi_reg idx, int bitnum, int value)
T
Tomi Valkeinen 已提交
530
{
531 532 533
	unsigned long timeout;
	ktime_t wait;
	int t;
T
Tomi Valkeinen 已提交
534

535 536 537 538 539
	/* first busyloop to see if the bit changes right away */
	t = 100;
	while (t-- > 0) {
		if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
			return value;
T
Tomi Valkeinen 已提交
540 541
	}

542 543 544 545 546
	/* then loop for 500ms, sleeping for 1ms in between */
	timeout = jiffies + msecs_to_jiffies(500);
	while (time_before(jiffies, timeout)) {
		if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
			return value;
T
Tomi Valkeinen 已提交
547

548 549 550
		wait = ns_to_ktime(1000 * 1000);
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
T
Tomi Valkeinen 已提交
551 552
	}

553
	return !value;
T
Tomi Valkeinen 已提交
554 555
}

556
static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
557 558 559 560 561 562 563 564 565 566 567
{
	switch (fmt) {
	case OMAP_DSS_DSI_FMT_RGB888:
	case OMAP_DSS_DSI_FMT_RGB666:
		return 24;
	case OMAP_DSS_DSI_FMT_RGB666_PACKED:
		return 18;
	case OMAP_DSS_DSI_FMT_RGB565:
		return 16;
	default:
		BUG();
568
		return 0;
569 570 571
	}
}

572
#ifdef DSI_PERF_MEASURE
573
static void dsi_perf_mark_setup(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
574
{
575 576
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	dsi->perf_setup_time = ktime_get();
T
Tomi Valkeinen 已提交
577 578
}

579
static void dsi_perf_mark_start(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
580
{
581 582
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	dsi->perf_start_time = ktime_get();
T
Tomi Valkeinen 已提交
583 584
}

585
static void dsi_perf_show(struct platform_device *dsidev, const char *name)
T
Tomi Valkeinen 已提交
586
{
587
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
588 589 590 591 592 593 594 595 596
	ktime_t t, setup_time, trans_time;
	u32 total_bytes;
	u32 setup_us, trans_us, total_us;

	if (!dsi_perf)
		return;

	t = ktime_get();

597
	setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
T
Tomi Valkeinen 已提交
598 599 600 601
	setup_us = (u32)ktime_to_us(setup_time);
	if (setup_us == 0)
		setup_us = 1;

602
	trans_time = ktime_sub(t, dsi->perf_start_time);
T
Tomi Valkeinen 已提交
603 604 605 606 607 608
	trans_us = (u32)ktime_to_us(trans_time);
	if (trans_us == 0)
		trans_us = 1;

	total_us = setup_us + trans_us;

609
	total_bytes = dsi->update_bytes;
T
Tomi Valkeinen 已提交
610

611 612 613 614 615 616 617 618
	pr_info("DSI(%s): %u us + %u us = %u us (%uHz), %u bytes, %u kbytes/sec\n",
		name,
		setup_us,
		trans_us,
		total_us,
		1000 * 1000 / total_us,
		total_bytes,
		total_bytes * 1000 / total_us);
T
Tomi Valkeinen 已提交
619 620
}
#else
621 622 623 624 625 626 627 628 629 630 631 632
static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
{
}

static inline void dsi_perf_mark_start(struct platform_device *dsidev)
{
}

static inline void dsi_perf_show(struct platform_device *dsidev,
		const char *name)
{
}
T
Tomi Valkeinen 已提交
633 634
#endif

635 636
static int verbose_irq;

T
Tomi Valkeinen 已提交
637 638
static void print_irq_status(u32 status)
{
639 640 641
	if (status == 0)
		return;

642
	if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
T
Tomi Valkeinen 已提交
643 644
		return;

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
#define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""

	pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
		status,
		verbose_irq ? PIS(VC0) : "",
		verbose_irq ? PIS(VC1) : "",
		verbose_irq ? PIS(VC2) : "",
		verbose_irq ? PIS(VC3) : "",
		PIS(WAKEUP),
		PIS(RESYNC),
		PIS(PLL_LOCK),
		PIS(PLL_UNLOCK),
		PIS(PLL_RECALL),
		PIS(COMPLEXIO_ERR),
		PIS(HS_TX_TIMEOUT),
		PIS(LP_RX_TIMEOUT),
		PIS(TE_TRIGGER),
		PIS(ACK_TRIGGER),
		PIS(SYNC_LOST),
		PIS(LDO_POWER_GOOD),
		PIS(TA_TIMEOUT));
#undef PIS
T
Tomi Valkeinen 已提交
667 668 669 670
}

static void print_irq_status_vc(int channel, u32 status)
{
671 672 673
	if (status == 0)
		return;

674
	if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
T
Tomi Valkeinen 已提交
675
		return;
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690

#define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""

	pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
		channel,
		status,
		PIS(CS),
		PIS(ECC_CORR),
		PIS(ECC_NO_CORR),
		verbose_irq ? PIS(PACKET_SENT) : "",
		PIS(BTA),
		PIS(FIFO_TX_OVF),
		PIS(FIFO_RX_OVF),
		PIS(FIFO_TX_UDF),
		PIS(PP_BUSY_CHANGE));
T
Tomi Valkeinen 已提交
691 692 693 694 695
#undef PIS
}

static void print_irq_status_cio(u32 status)
{
696 697 698
	if (status == 0)
		return;

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
#define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""

	pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
		status,
		PIS(ERRSYNCESC1),
		PIS(ERRSYNCESC2),
		PIS(ERRSYNCESC3),
		PIS(ERRESC1),
		PIS(ERRESC2),
		PIS(ERRESC3),
		PIS(ERRCONTROL1),
		PIS(ERRCONTROL2),
		PIS(ERRCONTROL3),
		PIS(STATEULPS1),
		PIS(STATEULPS2),
		PIS(STATEULPS3),
		PIS(ERRCONTENTIONLP0_1),
		PIS(ERRCONTENTIONLP1_1),
		PIS(ERRCONTENTIONLP0_2),
		PIS(ERRCONTENTIONLP1_2),
		PIS(ERRCONTENTIONLP0_3),
		PIS(ERRCONTENTIONLP1_3),
		PIS(ULPSACTIVENOT_ALL0),
		PIS(ULPSACTIVENOT_ALL1));
T
Tomi Valkeinen 已提交
723 724 725
#undef PIS
}

726
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
727 728
static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
		u32 *vcstatus, u32 ciostatus)
T
Tomi Valkeinen 已提交
729
{
730
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
731 732
	int i;

733
	spin_lock(&dsi->irq_stats_lock);
734

735 736
	dsi->irq_stats.irq_count++;
	dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
737 738

	for (i = 0; i < 4; ++i)
739
		dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
740

741
	dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
742

743
	spin_unlock(&dsi->irq_stats_lock);
744 745
}
#else
746
#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
747 748
#endif

749 750
static int debug_irq;

751 752
static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
		u32 *vcstatus, u32 ciostatus)
753
{
754
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
755 756
	int i;

T
Tomi Valkeinen 已提交
757 758 759
	if (irqstatus & DSI_IRQ_ERROR_MASK) {
		DSSERR("DSI error, irqstatus %x\n", irqstatus);
		print_irq_status(irqstatus);
760 761 762
		spin_lock(&dsi->errors_lock);
		dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
		spin_unlock(&dsi->errors_lock);
T
Tomi Valkeinen 已提交
763 764 765 766 767
	} else if (debug_irq) {
		print_irq_status(irqstatus);
	}

	for (i = 0; i < 4; ++i) {
768 769 770 771 772 773 774 775
		if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
			DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
				       i, vcstatus[i]);
			print_irq_status_vc(i, vcstatus[i]);
		} else if (debug_irq) {
			print_irq_status_vc(i, vcstatus[i]);
		}
	}
T
Tomi Valkeinen 已提交
776

777 778 779 780 781 782 783
	if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
		DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
		print_irq_status_cio(ciostatus);
	} else if (debug_irq) {
		print_irq_status_cio(ciostatus);
	}
}
T
Tomi Valkeinen 已提交
784

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
static void dsi_call_isrs(struct dsi_isr_data *isr_array,
		unsigned isr_array_size, u32 irqstatus)
{
	struct dsi_isr_data *isr_data;
	int i;

	for (i = 0; i < isr_array_size; i++) {
		isr_data = &isr_array[i];
		if (isr_data->isr && isr_data->mask & irqstatus)
			isr_data->isr(isr_data->arg, irqstatus);
	}
}

static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
		u32 irqstatus, u32 *vcstatus, u32 ciostatus)
{
	int i;

	dsi_call_isrs(isr_tables->isr_table,
			ARRAY_SIZE(isr_tables->isr_table),
			irqstatus);

	for (i = 0; i < 4; ++i) {
		if (vcstatus[i] == 0)
			continue;
		dsi_call_isrs(isr_tables->isr_table_vc[i],
				ARRAY_SIZE(isr_tables->isr_table_vc[i]),
				vcstatus[i]);
	}

	if (ciostatus != 0)
		dsi_call_isrs(isr_tables->isr_table_cio,
				ARRAY_SIZE(isr_tables->isr_table_cio),
				ciostatus);
}

821 822
static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
823
	struct platform_device *dsidev;
824
	struct dsi_data *dsi;
825 826
	u32 irqstatus, vcstatus[4], ciostatus;
	int i;
827

828
	dsidev = (struct platform_device *) arg;
829
	dsi = dsi_get_dsidrv_data(dsidev);
830

831 832 833
	if (!dsi->is_enabled)
		return IRQ_NONE;

834
	spin_lock(&dsi->irq_lock);
835

836
	irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
T
Tomi Valkeinen 已提交
837

838
	/* IRQ is not for us */
839
	if (!irqstatus) {
840
		spin_unlock(&dsi->irq_lock);
841
		return IRQ_NONE;
842
	}
843

844
	dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
845
	/* flush posted write */
846
	dsi_read_reg(dsidev, DSI_IRQSTATUS);
847 848 849 850 851

	for (i = 0; i < 4; ++i) {
		if ((irqstatus & (1 << i)) == 0) {
			vcstatus[i] = 0;
			continue;
T
Tomi Valkeinen 已提交
852 853
		}

854
		vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
855

856
		dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
T
Tomi Valkeinen 已提交
857
		/* flush posted write */
858
		dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
T
Tomi Valkeinen 已提交
859 860 861
	}

	if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
862
		ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
T
Tomi Valkeinen 已提交
863

864
		dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
T
Tomi Valkeinen 已提交
865
		/* flush posted write */
866
		dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
867 868 869
	} else {
		ciostatus = 0;
	}
T
Tomi Valkeinen 已提交
870

871 872
#ifdef DSI_CATCH_MISSING_TE
	if (irqstatus & DSI_IRQ_TE_TRIGGER)
873
		del_timer(&dsi->te_timer);
874 875
#endif

876 877
	/* make a copy and unlock, so that isrs can unregister
	 * themselves */
878 879
	memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
		sizeof(dsi->isr_tables));
880

881
	spin_unlock(&dsi->irq_lock);
882

883
	dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
884

885
	dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
886

887
	dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
888

889
	return IRQ_HANDLED;
T
Tomi Valkeinen 已提交
890 891
}

892
/* dsi->irq_lock has to be locked by the caller */
893 894
static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
		struct dsi_isr_data *isr_array,
895 896 897
		unsigned isr_array_size, u32 default_mask,
		const struct dsi_reg enable_reg,
		const struct dsi_reg status_reg)
T
Tomi Valkeinen 已提交
898
{
899 900 901
	struct dsi_isr_data *isr_data;
	u32 mask;
	u32 old_mask;
T
Tomi Valkeinen 已提交
902 903
	int i;

904
	mask = default_mask;
T
Tomi Valkeinen 已提交
905

906 907
	for (i = 0; i < isr_array_size; i++) {
		isr_data = &isr_array[i];
T
Tomi Valkeinen 已提交
908

909 910 911 912
		if (isr_data->isr == NULL)
			continue;

		mask |= isr_data->mask;
T
Tomi Valkeinen 已提交
913 914
	}

915
	old_mask = dsi_read_reg(dsidev, enable_reg);
916
	/* clear the irqstatus for newly enabled irqs */
917 918
	dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
	dsi_write_reg(dsidev, enable_reg, mask);
919 920

	/* flush posted writes */
921 922
	dsi_read_reg(dsidev, enable_reg);
	dsi_read_reg(dsidev, status_reg);
923
}
T
Tomi Valkeinen 已提交
924

925
/* dsi->irq_lock has to be locked by the caller */
926
static void _omap_dsi_set_irqs(struct platform_device *dsidev)
927
{
928
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
929
	u32 mask = DSI_IRQ_ERROR_MASK;
T
Tomi Valkeinen 已提交
930
#ifdef DSI_CATCH_MISSING_TE
931
	mask |= DSI_IRQ_TE_TRIGGER;
T
Tomi Valkeinen 已提交
932
#endif
933 934
	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
			ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
935 936
			DSI_IRQENABLE, DSI_IRQSTATUS);
}
T
Tomi Valkeinen 已提交
937

938
/* dsi->irq_lock has to be locked by the caller */
939
static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
940
{
941 942 943 944
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
945 946 947 948
			DSI_VC_IRQ_ERROR_MASK,
			DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}

949
/* dsi->irq_lock has to be locked by the caller */
950
static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
951
{
952 953 954 955
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
			ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
956 957 958 959
			DSI_CIO_IRQ_ERROR_MASK,
			DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
}

960
static void _dsi_initialize_irq(struct platform_device *dsidev)
961
{
962
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
963 964 965
	unsigned long flags;
	int vc;

966
	spin_lock_irqsave(&dsi->irq_lock, flags);
967

968
	memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
969

970
	_omap_dsi_set_irqs(dsidev);
971
	for (vc = 0; vc < 4; ++vc)
972 973
		_omap_dsi_set_irqs_vc(dsidev, vc);
	_omap_dsi_set_irqs_cio(dsidev);
974

975
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
976
}
T
Tomi Valkeinen 已提交
977

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
		struct dsi_isr_data *isr_array, unsigned isr_array_size)
{
	struct dsi_isr_data *isr_data;
	int free_idx;
	int i;

	BUG_ON(isr == NULL);

	/* check for duplicate entry and find a free slot */
	free_idx = -1;
	for (i = 0; i < isr_array_size; i++) {
		isr_data = &isr_array[i];

		if (isr_data->isr == isr && isr_data->arg == arg &&
				isr_data->mask == mask) {
			return -EINVAL;
		}

		if (isr_data->isr == NULL && free_idx == -1)
			free_idx = i;
	}

	if (free_idx == -1)
		return -EBUSY;

	isr_data = &isr_array[free_idx];
	isr_data->isr = isr;
	isr_data->arg = arg;
	isr_data->mask = mask;

	return 0;
}

static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
		struct dsi_isr_data *isr_array, unsigned isr_array_size)
{
	struct dsi_isr_data *isr_data;
	int i;

	for (i = 0; i < isr_array_size; i++) {
		isr_data = &isr_array[i];
		if (isr_data->isr != isr || isr_data->arg != arg ||
				isr_data->mask != mask)
			continue;

		isr_data->isr = NULL;
		isr_data->arg = NULL;
		isr_data->mask = 0;

		return 0;
	}

	return -EINVAL;
}

1034 1035
static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
		void *arg, u32 mask)
1036
{
1037
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1038 1039 1040
	unsigned long flags;
	int r;

1041
	spin_lock_irqsave(&dsi->irq_lock, flags);
1042

1043 1044
	r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
			ARRAY_SIZE(dsi->isr_tables.isr_table));
1045 1046

	if (r == 0)
1047
		_omap_dsi_set_irqs(dsidev);
1048

1049
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1050 1051 1052 1053

	return r;
}

1054 1055
static int dsi_unregister_isr(struct platform_device *dsidev,
		omap_dsi_isr_t isr, void *arg, u32 mask)
1056
{
1057
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1058 1059 1060
	unsigned long flags;
	int r;

1061
	spin_lock_irqsave(&dsi->irq_lock, flags);
1062

1063 1064
	r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
			ARRAY_SIZE(dsi->isr_tables.isr_table));
1065 1066

	if (r == 0)
1067
		_omap_dsi_set_irqs(dsidev);
1068

1069
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1070 1071 1072 1073

	return r;
}

1074 1075
static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
		omap_dsi_isr_t isr, void *arg, u32 mask)
1076
{
1077
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1078 1079 1080
	unsigned long flags;
	int r;

1081
	spin_lock_irqsave(&dsi->irq_lock, flags);
1082 1083

	r = _dsi_register_isr(isr, arg, mask,
1084 1085
			dsi->isr_tables.isr_table_vc[channel],
			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1086 1087

	if (r == 0)
1088
		_omap_dsi_set_irqs_vc(dsidev, channel);
1089

1090
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1091 1092 1093 1094

	return r;
}

1095 1096
static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
		omap_dsi_isr_t isr, void *arg, u32 mask)
1097
{
1098
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1099 1100 1101
	unsigned long flags;
	int r;

1102
	spin_lock_irqsave(&dsi->irq_lock, flags);
1103 1104

	r = _dsi_unregister_isr(isr, arg, mask,
1105 1106
			dsi->isr_tables.isr_table_vc[channel],
			ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1107 1108

	if (r == 0)
1109
		_omap_dsi_set_irqs_vc(dsidev, channel);
1110

1111
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1112 1113 1114 1115

	return r;
}

1116 1117
static int dsi_register_isr_cio(struct platform_device *dsidev,
		omap_dsi_isr_t isr, void *arg, u32 mask)
1118
{
1119
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1120 1121 1122
	unsigned long flags;
	int r;

1123
	spin_lock_irqsave(&dsi->irq_lock, flags);
1124

1125 1126
	r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1127 1128

	if (r == 0)
1129
		_omap_dsi_set_irqs_cio(dsidev);
1130

1131
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1132 1133 1134 1135

	return r;
}

1136 1137
static int dsi_unregister_isr_cio(struct platform_device *dsidev,
		omap_dsi_isr_t isr, void *arg, u32 mask)
1138
{
1139
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1140 1141 1142
	unsigned long flags;
	int r;

1143
	spin_lock_irqsave(&dsi->irq_lock, flags);
1144

1145 1146
	r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
			ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1147 1148

	if (r == 0)
1149
		_omap_dsi_set_irqs_cio(dsidev);
1150

1151
	spin_unlock_irqrestore(&dsi->irq_lock, flags);
1152 1153

	return r;
T
Tomi Valkeinen 已提交
1154 1155
}

1156
static u32 dsi_get_errors(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1157
{
1158
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1159 1160
	unsigned long flags;
	u32 e;
1161 1162 1163 1164
	spin_lock_irqsave(&dsi->errors_lock, flags);
	e = dsi->errors;
	dsi->errors = 0;
	spin_unlock_irqrestore(&dsi->errors_lock, flags);
T
Tomi Valkeinen 已提交
1165 1166 1167
	return e;
}

1168
static int dsi_runtime_get(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1169
{
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	int r;
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	DSSDBG("dsi_runtime_get\n");

	r = pm_runtime_get_sync(&dsi->pdev->dev);
	WARN_ON(r < 0);
	return r < 0 ? r : 0;
}

1180
static void dsi_runtime_put(struct platform_device *dsidev)
1181 1182 1183 1184 1185 1186
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int r;

	DSSDBG("dsi_runtime_put\n");

1187
	r = pm_runtime_put_sync(&dsi->pdev->dev);
1188
	WARN_ON(r < 0 && r != -ENOSYS);
T
Tomi Valkeinen 已提交
1189 1190
}

1191 1192 1193 1194 1195 1196 1197 1198
static int dsi_regulator_init(struct platform_device *dsidev)
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	struct regulator *vdds_dsi;

	if (dsi->vdds_dsi_reg != NULL)
		return 0;

1199
	vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
1200 1201

	if (IS_ERR(vdds_dsi)) {
1202
		if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
1203
			DSSERR("can't get DSI VDD regulator\n");
1204 1205 1206 1207 1208 1209 1210 1211
		return PTR_ERR(vdds_dsi);
	}

	dsi->vdds_dsi_reg = vdds_dsi;

	return 0;
}

1212
static void _dsi_print_reset_status(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1213
{
1214
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1215
	u32 l;
1216
	int b0, b1, b2;
T
Tomi Valkeinen 已提交
1217 1218 1219 1220

	/* A dummy read using the SCP interface to any DSIPHY register is
	 * required after DSIPHY reset to complete the reset of the DSI complex
	 * I/O. */
1221
	l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
T
Tomi Valkeinen 已提交
1222

1223
	if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
1224 1225 1226 1227 1228 1229 1230 1231 1232
		b0 = 28;
		b1 = 27;
		b2 = 26;
	} else {
		b0 = 24;
		b1 = 25;
		b2 = 26;
	}

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
#define DSI_FLD_GET(fld, start, end)\
	FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)

	pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
		DSI_FLD_GET(PLL_STATUS, 0, 0),
		DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
		DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
		DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
		DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
		DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
		DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
		DSI_FLD_GET(DSIPHY_CFG5, 31, 31));

#undef DSI_FLD_GET
T
Tomi Valkeinen 已提交
1247 1248
}

1249
static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
T
Tomi Valkeinen 已提交
1250 1251 1252 1253
{
	DSSDBG("dsi_if_enable(%d)\n", enable);

	enable = enable ? 1 : 0;
1254
	REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
T
Tomi Valkeinen 已提交
1255

1256
	if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
T
Tomi Valkeinen 已提交
1257 1258 1259 1260 1261 1262 1263
			DSSERR("Failed to set dsi_if_enable to %d\n", enable);
			return -EIO;
	}

	return 0;
}

1264
static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1265
{
1266 1267
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

1268
	return dsi->pll.cinfo.clkout[HSDIV_DISPC];
T
Tomi Valkeinen 已提交
1269 1270
}

1271
static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1272
{
1273 1274
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

1275
	return dsi->pll.cinfo.clkout[HSDIV_DSI];
T
Tomi Valkeinen 已提交
1276 1277
}

1278
static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1279
{
1280 1281
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

1282
	return dsi->pll.cinfo.clkdco / 16;
T
Tomi Valkeinen 已提交
1283 1284
}

1285
static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1286 1287
{
	unsigned long r;
1288
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1289

1290
	if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
1291
		/* DSI FCLK source is DSS_CLK_FCK */
1292
		r = clk_get_rate(dsi->dss_clk);
T
Tomi Valkeinen 已提交
1293
	} else {
1294
		/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1295
		r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
T
Tomi Valkeinen 已提交
1296 1297 1298 1299 1300
	}

	return r;
}

1301 1302 1303
static int dsi_lp_clock_calc(unsigned long dsi_fclk,
		unsigned long lp_clk_min, unsigned long lp_clk_max,
		struct dsi_lp_clock_info *lp_cinfo)
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
{
	unsigned lp_clk_div;
	unsigned long lp_clk;

	lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
	lp_clk = dsi_fclk / 2 / lp_clk_div;

	if (lp_clk < lp_clk_min || lp_clk > lp_clk_max)
		return -EINVAL;

1314 1315
	lp_cinfo->lp_clk_div = lp_clk_div;
	lp_cinfo->lp_clk = lp_clk;
1316 1317 1318 1319

	return 0;
}

1320
static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1321
{
1322
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1323 1324 1325
	unsigned long dsi_fclk;
	unsigned lp_clk_div;
	unsigned long lp_clk;
1326 1327
	unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);

T
Tomi Valkeinen 已提交
1328

1329
	lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
T
Tomi Valkeinen 已提交
1330

1331
	if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
T
Tomi Valkeinen 已提交
1332 1333
		return -EINVAL;

1334
	dsi_fclk = dsi_fclk_rate(dsidev);
T
Tomi Valkeinen 已提交
1335 1336 1337 1338

	lp_clk = dsi_fclk / 2 / lp_clk_div;

	DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1339 1340
	dsi->current_lp_cinfo.lp_clk = lp_clk;
	dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
T
Tomi Valkeinen 已提交
1341

1342 1343
	/* LP_CLK_DIVISOR */
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
T
Tomi Valkeinen 已提交
1344

1345 1346
	/* LP_RX_SYNCHRO_ENABLE */
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
T
Tomi Valkeinen 已提交
1347 1348 1349 1350

	return 0;
}

1351
static void dsi_enable_scp_clk(struct platform_device *dsidev)
1352
{
1353 1354 1355
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	if (dsi->scp_clk_refcount++ == 0)
1356
		REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1357 1358
}

1359
static void dsi_disable_scp_clk(struct platform_device *dsidev)
1360
{
1361 1362 1363 1364
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	WARN_ON(dsi->scp_clk_refcount == 0);
	if (--dsi->scp_clk_refcount == 0)
1365
		REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1366
}
T
Tomi Valkeinen 已提交
1367 1368 1369 1370 1371 1372 1373 1374

enum dsi_pll_power_state {
	DSI_PLL_POWER_OFF	= 0x0,
	DSI_PLL_POWER_ON_HSCLK	= 0x1,
	DSI_PLL_POWER_ON_ALL	= 0x2,
	DSI_PLL_POWER_ON_DIV	= 0x3,
};

1375 1376
static int dsi_pll_power(struct platform_device *dsidev,
		enum dsi_pll_power_state state)
T
Tomi Valkeinen 已提交
1377
{
1378
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1379 1380
	int t = 0;

1381
	/* DSI-PLL power command 0x3 is not working */
1382 1383
	if ((dsi->data->quirks & DSI_QUIRK_PLL_PWR_BUG) &&
	    state == DSI_PLL_POWER_ON_DIV)
1384 1385
		state = DSI_PLL_POWER_ON_ALL;

1386 1387
	/* PLL_PWR_CMD */
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
T
Tomi Valkeinen 已提交
1388 1389

	/* PLL_PWR_STATUS */
1390
	while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1391
		if (++t > 1000) {
T
Tomi Valkeinen 已提交
1392 1393 1394 1395
			DSSERR("Failed to set DSI PLL power mode to %d\n",
					state);
			return -ENODEV;
		}
1396
		udelay(1);
T
Tomi Valkeinen 已提交
1397 1398 1399 1400 1401 1402
	}

	return 0;
}


1403
static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
1404 1405 1406 1407 1408
{
	unsigned long max_dsi_fck;

	max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);

1409 1410
	cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
	cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
1411 1412
}

1413
static int dsi_pll_enable(struct dss_pll *pll)
1414
{
1415 1416
	struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
	struct platform_device *dsidev = dsi->pdev;
T
Tomi Valkeinen 已提交
1417 1418 1419 1420
	int r = 0;

	DSSDBG("PLL init\n");

1421 1422 1423
	r = dsi_regulator_init(dsidev);
	if (r)
		return r;
1424

1425 1426 1427 1428
	r = dsi_runtime_get(dsidev);
	if (r)
		return r;

1429 1430 1431
	/*
	 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
	 */
1432
	dsi_enable_scp_clk(dsidev);
T
Tomi Valkeinen 已提交
1433

1434 1435
	if (!dsi->vdds_dsi_enabled) {
		r = regulator_enable(dsi->vdds_dsi_reg);
1436 1437
		if (r)
			goto err0;
1438
		dsi->vdds_dsi_enabled = true;
1439
	}
T
Tomi Valkeinen 已提交
1440 1441 1442 1443

	/* XXX PLL does not come out of reset without this... */
	dispc_pck_free_enable(1);

1444
	if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
T
Tomi Valkeinen 已提交
1445 1446
		DSSERR("PLL not coming out of reset.\n");
		r = -ENODEV;
1447
		dispc_pck_free_enable(0);
T
Tomi Valkeinen 已提交
1448 1449 1450 1451 1452 1453 1454
		goto err1;
	}

	/* XXX ... but if left on, we get problems when planes do not
	 * fill the whole display. No idea about this */
	dispc_pck_free_enable(0);

1455
	r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
T
Tomi Valkeinen 已提交
1456 1457 1458 1459 1460 1461 1462 1463

	if (r)
		goto err1;

	DSSDBG("PLL init done\n");

	return 0;
err1:
1464 1465 1466
	if (dsi->vdds_dsi_enabled) {
		regulator_disable(dsi->vdds_dsi_reg);
		dsi->vdds_dsi_enabled = false;
1467
	}
T
Tomi Valkeinen 已提交
1468
err0:
1469
	dsi_disable_scp_clk(dsidev);
1470
	dsi_runtime_put(dsidev);
T
Tomi Valkeinen 已提交
1471 1472 1473
	return r;
}

1474
static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
T
Tomi Valkeinen 已提交
1475
{
1476 1477
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

1478
	dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1479
	if (disconnect_lanes) {
1480 1481 1482
		WARN_ON(!dsi->vdds_dsi_enabled);
		regulator_disable(dsi->vdds_dsi_reg);
		dsi->vdds_dsi_enabled = false;
1483
	}
1484

1485
	dsi_disable_scp_clk(dsidev);
1486
	dsi_runtime_put(dsidev);
1487

T
Tomi Valkeinen 已提交
1488 1489 1490
	DSSDBG("PLL uninit done\n");
}

1491 1492 1493 1494 1495 1496 1497 1498
static void dsi_pll_disable(struct dss_pll *pll)
{
	struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
	struct platform_device *dsidev = dsi->pdev;

	dsi_pll_uninit(dsidev, true);
}

1499 1500
static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
		struct seq_file *s)
T
Tomi Valkeinen 已提交
1501
{
1502
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1503
	struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1504
	enum dss_clk_source dispc_clk_src, dsi_clk_src;
1505
	int dsi_module = dsi->module_id;
1506
	struct dss_pll *pll = &dsi->pll;
1507 1508

	dispc_clk_src = dss_get_dispc_clk_source();
1509
	dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
T
Tomi Valkeinen 已提交
1510

1511 1512
	if (dsi_runtime_get(dsidev))
		return;
T
Tomi Valkeinen 已提交
1513

1514
	seq_printf(s,	"- DSI%d PLL -\n", dsi_module + 1);
T
Tomi Valkeinen 已提交
1515

1516
	seq_printf(s,	"dsi pll clkin\t%lu\n", clk_get_rate(pll->clkin));
T
Tomi Valkeinen 已提交
1517

1518
	seq_printf(s,	"Fint\t\t%-16lun %u\n", cinfo->fint, cinfo->n);
T
Tomi Valkeinen 已提交
1519

1520 1521
	seq_printf(s,	"CLKIN4DDR\t%-16lum %u\n",
			cinfo->clkdco, cinfo->m);
T
Tomi Valkeinen 已提交
1522

1523
	seq_printf(s,	"DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1524
			dss_get_clk_source_name(dsi_module == 0 ?
1525 1526
				DSS_CLK_SRC_PLL1_1 :
				DSS_CLK_SRC_PLL2_1),
1527
			cinfo->clkout[HSDIV_DISPC],
1528
			cinfo->mX[HSDIV_DISPC],
1529
			dispc_clk_src == DSS_CLK_SRC_FCK ?
1530
			"off" : "on");
T
Tomi Valkeinen 已提交
1531

1532
	seq_printf(s,	"DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1533
			dss_get_clk_source_name(dsi_module == 0 ?
1534 1535
				DSS_CLK_SRC_PLL1_2 :
				DSS_CLK_SRC_PLL2_2),
1536
			cinfo->clkout[HSDIV_DSI],
1537
			cinfo->mX[HSDIV_DSI],
1538
			dsi_clk_src == DSS_CLK_SRC_FCK ?
1539
			"off" : "on");
T
Tomi Valkeinen 已提交
1540

1541
	seq_printf(s,	"- DSI%d -\n", dsi_module + 1);
T
Tomi Valkeinen 已提交
1542

1543
	seq_printf(s,	"dsi fclk source = %s\n",
1544
			dss_get_clk_source_name(dsi_clk_src));
T
Tomi Valkeinen 已提交
1545

1546
	seq_printf(s,	"DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
T
Tomi Valkeinen 已提交
1547 1548

	seq_printf(s,	"DDR_CLK\t\t%lu\n",
1549
			cinfo->clkdco / 4);
T
Tomi Valkeinen 已提交
1550

1551
	seq_printf(s,	"TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
T
Tomi Valkeinen 已提交
1552

1553
	seq_printf(s,	"LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
T
Tomi Valkeinen 已提交
1554

1555
	dsi_runtime_put(dsidev);
T
Tomi Valkeinen 已提交
1556 1557
}

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
void dsi_dump_clocks(struct seq_file *s)
{
	struct platform_device *dsidev;
	int i;

	for  (i = 0; i < MAX_NUM_DSI; i++) {
		dsidev = dsi_get_dsidev_from_id(i);
		if (dsidev)
			dsi_dump_dsidev_clocks(dsidev, s);
	}
}

1570
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1571 1572
static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
		struct seq_file *s)
1573
{
1574
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1575 1576 1577
	unsigned long flags;
	struct dsi_irq_stats stats;

1578
	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1579

1580 1581 1582
	stats = dsi->irq_stats;
	memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
	dsi->irq_stats.last_reset = jiffies;
1583

1584
	spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1585 1586 1587 1588 1589 1590 1591 1592

	seq_printf(s, "period %u ms\n",
			jiffies_to_msecs(jiffies - stats.last_reset));

	seq_printf(s, "irqs %d\n", stats.irq_count);
#define PIS(x) \
	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);

1593
	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
	PIS(VC0);
	PIS(VC1);
	PIS(VC2);
	PIS(VC3);
	PIS(WAKEUP);
	PIS(RESYNC);
	PIS(PLL_LOCK);
	PIS(PLL_UNLOCK);
	PIS(PLL_RECALL);
	PIS(COMPLEXIO_ERR);
	PIS(HS_TX_TIMEOUT);
	PIS(LP_RX_TIMEOUT);
	PIS(TE_TRIGGER);
	PIS(ACK_TRIGGER);
	PIS(SYNC_LOST);
	PIS(LDO_POWER_GOOD);
	PIS(TA_TIMEOUT);
#undef PIS

#define PIS(x) \
	seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
			stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
			stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
			stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
			stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);

	seq_printf(s, "-- VC interrupts --\n");
	PIS(CS);
	PIS(ECC_CORR);
	PIS(PACKET_SENT);
	PIS(FIFO_TX_OVF);
	PIS(FIFO_RX_OVF);
	PIS(BTA);
	PIS(ECC_NO_CORR);
	PIS(FIFO_TX_UDF);
	PIS(PP_BUSY_CHANGE);
#undef PIS

#define PIS(x) \
	seq_printf(s, "%-20s %10d\n", #x, \
			stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);

	seq_printf(s, "-- CIO interrupts --\n");
	PIS(ERRSYNCESC1);
	PIS(ERRSYNCESC2);
	PIS(ERRSYNCESC3);
	PIS(ERRESC1);
	PIS(ERRESC2);
	PIS(ERRESC3);
	PIS(ERRCONTROL1);
	PIS(ERRCONTROL2);
	PIS(ERRCONTROL3);
	PIS(STATEULPS1);
	PIS(STATEULPS2);
	PIS(STATEULPS3);
	PIS(ERRCONTENTIONLP0_1);
	PIS(ERRCONTENTIONLP1_1);
	PIS(ERRCONTENTIONLP0_2);
	PIS(ERRCONTENTIONLP1_2);
	PIS(ERRCONTENTIONLP0_3);
	PIS(ERRCONTENTIONLP1_3);
	PIS(ULPSACTIVENOT_ALL0);
	PIS(ULPSACTIVENOT_ALL1);
#undef PIS
}

1660
static void dsi1_dump_irqs(struct seq_file *s)
T
Tomi Valkeinen 已提交
1661
{
1662 1663
	struct platform_device *dsidev = dsi_get_dsidev_from_id(0);

1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
	dsi_dump_dsidev_irqs(dsidev, s);
}

static void dsi2_dump_irqs(struct seq_file *s)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_id(1);

	dsi_dump_dsidev_irqs(dsidev, s);
}
#endif

static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
		struct seq_file *s)
{
1678
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
T
Tomi Valkeinen 已提交
1679

1680 1681
	if (dsi_runtime_get(dsidev))
		return;
1682
	dsi_enable_scp_clk(dsidev);
T
Tomi Valkeinen 已提交
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753

	DUMPREG(DSI_REVISION);
	DUMPREG(DSI_SYSCONFIG);
	DUMPREG(DSI_SYSSTATUS);
	DUMPREG(DSI_IRQSTATUS);
	DUMPREG(DSI_IRQENABLE);
	DUMPREG(DSI_CTRL);
	DUMPREG(DSI_COMPLEXIO_CFG1);
	DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
	DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
	DUMPREG(DSI_CLK_CTRL);
	DUMPREG(DSI_TIMING1);
	DUMPREG(DSI_TIMING2);
	DUMPREG(DSI_VM_TIMING1);
	DUMPREG(DSI_VM_TIMING2);
	DUMPREG(DSI_VM_TIMING3);
	DUMPREG(DSI_CLK_TIMING);
	DUMPREG(DSI_TX_FIFO_VC_SIZE);
	DUMPREG(DSI_RX_FIFO_VC_SIZE);
	DUMPREG(DSI_COMPLEXIO_CFG2);
	DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
	DUMPREG(DSI_VM_TIMING4);
	DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
	DUMPREG(DSI_VM_TIMING5);
	DUMPREG(DSI_VM_TIMING6);
	DUMPREG(DSI_VM_TIMING7);
	DUMPREG(DSI_STOPCLK_TIMING);

	DUMPREG(DSI_VC_CTRL(0));
	DUMPREG(DSI_VC_TE(0));
	DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
	DUMPREG(DSI_VC_IRQSTATUS(0));
	DUMPREG(DSI_VC_IRQENABLE(0));

	DUMPREG(DSI_VC_CTRL(1));
	DUMPREG(DSI_VC_TE(1));
	DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
	DUMPREG(DSI_VC_IRQSTATUS(1));
	DUMPREG(DSI_VC_IRQENABLE(1));

	DUMPREG(DSI_VC_CTRL(2));
	DUMPREG(DSI_VC_TE(2));
	DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
	DUMPREG(DSI_VC_IRQSTATUS(2));
	DUMPREG(DSI_VC_IRQENABLE(2));

	DUMPREG(DSI_VC_CTRL(3));
	DUMPREG(DSI_VC_TE(3));
	DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
	DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
	DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
	DUMPREG(DSI_VC_IRQSTATUS(3));
	DUMPREG(DSI_VC_IRQENABLE(3));

	DUMPREG(DSI_DSIPHY_CFG0);
	DUMPREG(DSI_DSIPHY_CFG1);
	DUMPREG(DSI_DSIPHY_CFG2);
	DUMPREG(DSI_DSIPHY_CFG5);

	DUMPREG(DSI_PLL_CONTROL);
	DUMPREG(DSI_PLL_STATUS);
	DUMPREG(DSI_PLL_GO);
	DUMPREG(DSI_PLL_CONFIGURATION1);
	DUMPREG(DSI_PLL_CONFIGURATION2);

1754
	dsi_disable_scp_clk(dsidev);
1755
	dsi_runtime_put(dsidev);
T
Tomi Valkeinen 已提交
1756 1757 1758
#undef DUMPREG
}

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
static void dsi1_dump_regs(struct seq_file *s)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_id(0);

	dsi_dump_dsidev_regs(dsidev, s);
}

static void dsi2_dump_regs(struct seq_file *s)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_id(1);

	dsi_dump_dsidev_regs(dsidev, s);
}

1773
enum dsi_cio_power_state {
T
Tomi Valkeinen 已提交
1774 1775 1776 1777 1778
	DSI_COMPLEXIO_POWER_OFF		= 0x0,
	DSI_COMPLEXIO_POWER_ON		= 0x1,
	DSI_COMPLEXIO_POWER_ULPS	= 0x2,
};

1779 1780
static int dsi_cio_power(struct platform_device *dsidev,
		enum dsi_cio_power_state state)
T
Tomi Valkeinen 已提交
1781 1782 1783 1784
{
	int t = 0;

	/* PWR_CMD */
1785
	REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
T
Tomi Valkeinen 已提交
1786 1787

	/* PWR_STATUS */
1788 1789
	while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
			26, 25) != state) {
1790
		if (++t > 1000) {
T
Tomi Valkeinen 已提交
1791 1792 1793 1794
			DSSERR("failed to set complexio power state to "
					"%d\n", state);
			return -ENODEV;
		}
1795
		udelay(1);
T
Tomi Valkeinen 已提交
1796 1797 1798 1799 1800
	}

	return 0;
}

1801 1802
static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
{
1803
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1804 1805 1806 1807 1808 1809
	int val;

	/* line buffer on OMAP3 is 1024 x 24bits */
	/* XXX: for some reason using full buffer size causes
	 * considerable TX slowdown with update sizes that fill the
	 * whole buffer */
1810
	if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
		return 1023 * 3;

	val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */

	switch (val) {
	case 1:
		return 512 * 3;		/* 512x24 bits */
	case 2:
		return 682 * 3;		/* 682x24 bits */
	case 3:
		return 853 * 3;		/* 853x24 bits */
	case 4:
		return 1024 * 3;	/* 1024x24 bits */
	case 5:
		return 1194 * 3;	/* 1194x24 bits */
	case 6:
		return 1365 * 3;	/* 1365x24 bits */
1828 1829
	case 7:
		return 1920 * 3;	/* 1920x24 bits */
1830 1831
	default:
		BUG();
1832
		return 0;
1833 1834 1835
	}
}

1836
static int dsi_set_lane_config(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1837
{
1838 1839 1840 1841 1842 1843 1844 1845 1846
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	static const u8 offsets[] = { 0, 4, 8, 12, 16 };
	static const enum dsi_lane_function functions[] = {
		DSI_LANE_CLK,
		DSI_LANE_DATA1,
		DSI_LANE_DATA2,
		DSI_LANE_DATA3,
		DSI_LANE_DATA4,
	};
T
Tomi Valkeinen 已提交
1847
	u32 r;
1848
	int i;
T
Tomi Valkeinen 已提交
1849

1850
	r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868

	for (i = 0; i < dsi->num_lanes_used; ++i) {
		unsigned offset = offsets[i];
		unsigned polarity, lane_number;
		unsigned t;

		for (t = 0; t < dsi->num_lanes_supported; ++t)
			if (dsi->lanes[t].function == functions[i])
				break;

		if (t == dsi->num_lanes_supported)
			return -EINVAL;

		lane_number = t;
		polarity = dsi->lanes[t].polarity;

		r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
		r = FLD_MOD(r, polarity, offset + 3, offset + 3);
1869 1870
	}

1871 1872 1873 1874 1875 1876
	/* clear the unused lanes */
	for (; i < dsi->num_lanes_supported; ++i) {
		unsigned offset = offsets[i];

		r = FLD_MOD(r, 0, offset + 2, offset);
		r = FLD_MOD(r, 0, offset + 3, offset + 3);
1877
	}
T
Tomi Valkeinen 已提交
1878

1879
	dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
T
Tomi Valkeinen 已提交
1880

1881
	return 0;
T
Tomi Valkeinen 已提交
1882 1883
}

1884
static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
T
Tomi Valkeinen 已提交
1885
{
1886 1887
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

T
Tomi Valkeinen 已提交
1888
	/* convert time in ns to ddr ticks, rounding up */
1889
	unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
T
Tomi Valkeinen 已提交
1890 1891 1892
	return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}

1893
static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
T
Tomi Valkeinen 已提交
1894
{
1895 1896
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

1897
	unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
T
Tomi Valkeinen 已提交
1898 1899 1900
	return ddr * 1000 * 1000 / (ddr_clk / 1000);
}

1901
static void dsi_cio_timings(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
1902
{
1903
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
	u32 r;
	u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
	u32 tlpx_half, tclk_trail, tclk_zero;
	u32 tclk_prepare;

	/* calculate timings */

	/* 1 * DDR_CLK = 2 * UI */

	/* min 40ns + 4*UI	max 85ns + 6*UI */
1914
	ths_prepare = ns2ddr(dsidev, 70) + 2;
T
Tomi Valkeinen 已提交
1915 1916

	/* min 145ns + 10*UI */
1917
	ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
T
Tomi Valkeinen 已提交
1918 1919

	/* min max(8*UI, 60ns+4*UI) */
1920
	ths_trail = ns2ddr(dsidev, 60) + 5;
T
Tomi Valkeinen 已提交
1921 1922

	/* min 100ns */
1923
	ths_exit = ns2ddr(dsidev, 145);
T
Tomi Valkeinen 已提交
1924 1925

	/* tlpx min 50n */
1926
	tlpx_half = ns2ddr(dsidev, 25);
T
Tomi Valkeinen 已提交
1927 1928

	/* min 60ns */
1929
	tclk_trail = ns2ddr(dsidev, 60) + 2;
T
Tomi Valkeinen 已提交
1930 1931

	/* min 38ns, max 95ns */
1932
	tclk_prepare = ns2ddr(dsidev, 65);
T
Tomi Valkeinen 已提交
1933 1934

	/* min tclk-prepare + tclk-zero = 300ns */
1935
	tclk_zero = ns2ddr(dsidev, 260);
T
Tomi Valkeinen 已提交
1936 1937

	DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1938 1939
		ths_prepare, ddr2ns(dsidev, ths_prepare),
		ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
T
Tomi Valkeinen 已提交
1940
	DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1941 1942
			ths_trail, ddr2ns(dsidev, ths_trail),
			ths_exit, ddr2ns(dsidev, ths_exit));
T
Tomi Valkeinen 已提交
1943 1944 1945

	DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
			"tclk_zero %u (%uns)\n",
1946 1947 1948
			tlpx_half, ddr2ns(dsidev, tlpx_half),
			tclk_trail, ddr2ns(dsidev, tclk_trail),
			tclk_zero, ddr2ns(dsidev, tclk_zero));
T
Tomi Valkeinen 已提交
1949
	DSSDBG("tclk_prepare %u (%uns)\n",
1950
			tclk_prepare, ddr2ns(dsidev, tclk_prepare));
T
Tomi Valkeinen 已提交
1951 1952 1953

	/* program timings */

1954
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
T
Tomi Valkeinen 已提交
1955 1956 1957 1958
	r = FLD_MOD(r, ths_prepare, 31, 24);
	r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
	r = FLD_MOD(r, ths_trail, 15, 8);
	r = FLD_MOD(r, ths_exit, 7, 0);
1959
	dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
T
Tomi Valkeinen 已提交
1960

1961
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
1962
	r = FLD_MOD(r, tlpx_half, 20, 16);
T
Tomi Valkeinen 已提交
1963 1964
	r = FLD_MOD(r, tclk_trail, 15, 8);
	r = FLD_MOD(r, tclk_zero, 7, 0);
1965

1966
	if (dsi->data->quirks & DSI_QUIRK_PHY_DCC) {
1967 1968 1969 1970 1971
		r = FLD_MOD(r, 0, 21, 21);	/* DCCEN = disable */
		r = FLD_MOD(r, 1, 22, 22);	/* CLKINP_DIVBY2EN = enable */
		r = FLD_MOD(r, 1, 23, 23);	/* CLKINP_SEL = enable */
	}

1972
	dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
T
Tomi Valkeinen 已提交
1973

1974
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
T
Tomi Valkeinen 已提交
1975
	r = FLD_MOD(r, tclk_prepare, 7, 0);
1976
	dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
T
Tomi Valkeinen 已提交
1977 1978
}

1979
/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
1980
static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
1981
		unsigned mask_p, unsigned mask_n)
1982
{
1983
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1984 1985
	int i;
	u32 l;
1986
	u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
1987

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	l = 0;

	for (i = 0; i < dsi->num_lanes_supported; ++i) {
		unsigned p = dsi->lanes[i].polarity;

		if (mask_p & (1 << i))
			l |= 1 << (i * 2 + (p ? 0 : 1));

		if (mask_n & (1 << i))
			l |= 1 << (i * 2 + (p ? 1 : 0));
	}

2000 2001 2002 2003 2004
	/*
	 * Bits in REGLPTXSCPDAT4TO0DXDY:
	 * 17: DY0 18: DX0
	 * 19: DY1 20: DX1
	 * 21: DY2 22: DX2
2005 2006
	 * 23: DY3 24: DX3
	 * 25: DY4 26: DX4
2007 2008 2009
	 */

	/* Set the lane override configuration */
2010 2011

	/* REGLPTXSCPDAT4TO0DXDY */
2012
	REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2013 2014

	/* Enable lane override */
2015 2016 2017

	/* ENLPTXSCPDAT */
	REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2018 2019
}

2020
static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2021 2022
{
	/* Disable lane override */
2023
	REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2024
	/* Reset the lane override configuration */
2025 2026
	/* REGLPTXSCPDAT4TO0DXDY */
	REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2027
}
T
Tomi Valkeinen 已提交
2028

2029
static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2030
{
2031 2032 2033 2034 2035 2036 2037
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int t, i;
	bool in_use[DSI_MAX_NR_LANES];
	static const u8 offsets_old[] = { 28, 27, 26 };
	static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
	const u8 *offsets;

2038
	if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC)
2039 2040 2041
		offsets = offsets_old;
	else
		offsets = offsets_new;
2042

2043 2044
	for (i = 0; i < dsi->num_lanes_supported; ++i)
		in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
2045 2046 2047 2048 2049 2050

	t = 100000;
	while (true) {
		u32 l;
		int ok;

2051
		l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2052 2053

		ok = 0;
2054 2055
		for (i = 0; i < dsi->num_lanes_supported; ++i) {
			if (!in_use[i] || (l & (1 << offsets[i])))
2056 2057 2058
				ok++;
		}

2059
		if (ok == dsi->num_lanes_supported)
2060 2061 2062
			break;

		if (--t == 0) {
2063 2064
			for (i = 0; i < dsi->num_lanes_supported; ++i) {
				if (!in_use[i] || (l & (1 << offsets[i])))
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
					continue;

				DSSERR("CIO TXCLKESC%d domain not coming " \
						"out of reset\n", i);
			}
			return -EIO;
		}
	}

	return 0;
}

2077
/* return bitmask of enabled lanes, lane0 being the lsb */
2078
static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2079
{
2080 2081 2082
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	unsigned mask = 0;
	int i;
2083

2084 2085 2086 2087
	for (i = 0; i < dsi->num_lanes_supported; ++i) {
		if (dsi->lanes[i].function != DSI_LANE_UNUSED)
			mask |= 1 << i;
	}
2088

2089
	return mask;
2090 2091
}

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
/* OMAP4 CONTROL_DSIPHY */
#define OMAP4_DSIPHY_SYSCON_OFFSET			0x78

#define OMAP4_DSI2_LANEENABLE_SHIFT			29
#define OMAP4_DSI2_LANEENABLE_MASK			(0x7 << 29)
#define OMAP4_DSI1_LANEENABLE_SHIFT			24
#define OMAP4_DSI1_LANEENABLE_MASK			(0x1f << 24)
#define OMAP4_DSI1_PIPD_SHIFT				19
#define OMAP4_DSI1_PIPD_MASK				(0x1f << 19)
#define OMAP4_DSI2_PIPD_SHIFT				14
#define OMAP4_DSI2_PIPD_MASK				(0x1f << 14)

static int dsi_omap4_mux_pads(struct dsi_data *dsi, unsigned int lanes)
{
	u32 enable_mask, enable_shift;
	u32 pipd_mask, pipd_shift;
	u32 reg;

	if (!dsi->syscon)
		return 0;

	if (dsi->module_id == 0) {
		enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
		enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
		pipd_mask = OMAP4_DSI1_PIPD_MASK;
		pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
	} else if (dsi->module_id == 1) {
		enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
		enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
		pipd_mask = OMAP4_DSI2_PIPD_MASK;
		pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
	} else {
		return -ENODEV;
	}

	regmap_read(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);

	reg &= ~enable_mask;
	reg &= ~pipd_mask;

	reg |= (lanes << enable_shift) & enable_mask;
	reg |= (lanes << pipd_shift) & pipd_mask;

	regmap_write(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET, reg);

	return 0;
}

static int dsi_enable_pads(struct dsi_data *dsi, unsigned int lane_mask)
{
	return dsi_omap4_mux_pads(dsi, lane_mask);
}

static void dsi_disable_pads(struct dsi_data *dsi)
{
	dsi_omap4_mux_pads(dsi, 0);
}

2150
static int dsi_cio_init(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
2151
{
2152
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2153
	int r;
2154
	u32 l;
T
Tomi Valkeinen 已提交
2155

2156
	DSSDBG("DSI CIO init starts");
T
Tomi Valkeinen 已提交
2157

2158
	r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
2159 2160
	if (r)
		return r;
2161

2162
	dsi_enable_scp_clk(dsidev);
2163

T
Tomi Valkeinen 已提交
2164 2165 2166
	/* A dummy read using the SCP interface to any DSIPHY register is
	 * required after DSIPHY reset to complete the reset of the DSI complex
	 * I/O. */
2167
	dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
T
Tomi Valkeinen 已提交
2168

2169
	if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2170 2171 2172
		DSSERR("CIO SCP Clock domain not coming out of reset.\n");
		r = -EIO;
		goto err_scp_clk_dom;
T
Tomi Valkeinen 已提交
2173 2174
	}

2175
	r = dsi_set_lane_config(dsidev);
2176 2177
	if (r)
		goto err_scp_clk_dom;
T
Tomi Valkeinen 已提交
2178

2179
	/* set TX STOP MODE timer to maximum for this operation */
2180
	l = dsi_read_reg(dsidev, DSI_TIMING1);
2181 2182 2183 2184
	l = FLD_MOD(l, 1, 15, 15);	/* FORCE_TX_STOP_MODE_IO */
	l = FLD_MOD(l, 1, 14, 14);	/* STOP_STATE_X16_IO */
	l = FLD_MOD(l, 1, 13, 13);	/* STOP_STATE_X4_IO */
	l = FLD_MOD(l, 0x1fff, 12, 0);	/* STOP_STATE_COUNTER_IO */
2185
	dsi_write_reg(dsidev, DSI_TIMING1, l);
2186

2187
	if (dsi->ulps_enabled) {
2188 2189
		unsigned mask_p;
		int i;
2190

2191 2192
		DSSDBG("manual ulps exit\n");

2193 2194 2195 2196 2197
		/* ULPS is exited by Mark-1 state for 1ms, followed by
		 * stop state. DSS HW cannot do this via the normal
		 * ULPS exit sequence, as after reset the DSS HW thinks
		 * that we are not in ULPS mode, and refuses to send the
		 * sequence. So we need to send the ULPS exit sequence
2198 2199
		 * manually by setting positive lines high and negative lines
		 * low for 1ms.
2200 2201
		 */

2202
		mask_p = 0;
2203

2204 2205 2206 2207 2208
		for (i = 0; i < dsi->num_lanes_supported; ++i) {
			if (dsi->lanes[i].function == DSI_LANE_UNUSED)
				continue;
			mask_p |= 1 << i;
		}
2209

2210
		dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2211
	}
T
Tomi Valkeinen 已提交
2212

2213
	r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
T
Tomi Valkeinen 已提交
2214
	if (r)
2215 2216
		goto err_cio_pwr;

2217
	if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2218 2219 2220 2221 2222
		DSSERR("CIO PWR clock domain not coming out of reset.\n");
		r = -ENODEV;
		goto err_cio_pwr_dom;
	}

2223 2224 2225
	dsi_if_enable(dsidev, true);
	dsi_if_enable(dsidev, false);
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
T
Tomi Valkeinen 已提交
2226

2227
	r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2228 2229 2230
	if (r)
		goto err_tx_clk_esc_rst;

2231
	if (dsi->ulps_enabled) {
2232 2233 2234 2235 2236 2237 2238
		/* Keep Mark-1 state for 1ms (as per DSI spec) */
		ktime_t wait = ns_to_ktime(1000 * 1000);
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_hrtimeout(&wait, HRTIMER_MODE_REL);

		/* Disable the override. The lanes should be set to Mark-11
		 * state by the HW */
2239
		dsi_cio_disable_lane_override(dsidev);
2240 2241 2242
	}

	/* FORCE_TX_STOP_MODE_IO */
2243
	REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2244

2245
	dsi_cio_timings(dsidev);
T
Tomi Valkeinen 已提交
2246

2247
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2248 2249
		/* DDR_CLK_ALWAYS_ON */
		REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2250
			dsi->vm_timings.ddr_clk_always_on, 13, 13);
2251 2252
	}

2253
	dsi->ulps_enabled = false;
T
Tomi Valkeinen 已提交
2254 2255

	DSSDBG("CIO init done\n");
2256 2257 2258

	return 0;

2259
err_tx_clk_esc_rst:
2260
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2261
err_cio_pwr_dom:
2262
	dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2263
err_cio_pwr:
2264
	if (dsi->ulps_enabled)
2265
		dsi_cio_disable_lane_override(dsidev);
2266
err_scp_clk_dom:
2267
	dsi_disable_scp_clk(dsidev);
2268
	dsi_disable_pads(dsi);
T
Tomi Valkeinen 已提交
2269 2270 2271
	return r;
}

2272
static void dsi_cio_uninit(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
2273
{
2274
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2275

2276 2277 2278
	/* DDR_CLK_ALWAYS_ON */
	REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);

2279 2280
	dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
	dsi_disable_scp_clk(dsidev);
2281
	dsi_disable_pads(dsi);
T
Tomi Valkeinen 已提交
2282 2283
}

2284 2285
static void dsi_config_tx_fifo(struct platform_device *dsidev,
		enum fifo_size size1, enum fifo_size size2,
T
Tomi Valkeinen 已提交
2286 2287
		enum fifo_size size3, enum fifo_size size4)
{
2288
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2289 2290 2291 2292
	u32 r = 0;
	int add = 0;
	int i;

T
Tomi Valkeinen 已提交
2293 2294 2295 2296
	dsi->vc[0].tx_fifo_size = size1;
	dsi->vc[1].tx_fifo_size = size2;
	dsi->vc[2].tx_fifo_size = size3;
	dsi->vc[3].tx_fifo_size = size4;
T
Tomi Valkeinen 已提交
2297 2298 2299

	for (i = 0; i < 4; i++) {
		u8 v;
T
Tomi Valkeinen 已提交
2300
		int size = dsi->vc[i].tx_fifo_size;
T
Tomi Valkeinen 已提交
2301 2302 2303 2304

		if (add + size > 4) {
			DSSERR("Illegal FIFO configuration\n");
			BUG();
2305
			return;
T
Tomi Valkeinen 已提交
2306 2307 2308 2309 2310 2311 2312 2313
		}

		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
		r |= v << (8 * i);
		/*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
		add += size;
	}

2314
	dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
T
Tomi Valkeinen 已提交
2315 2316
}

2317 2318
static void dsi_config_rx_fifo(struct platform_device *dsidev,
		enum fifo_size size1, enum fifo_size size2,
T
Tomi Valkeinen 已提交
2319 2320
		enum fifo_size size3, enum fifo_size size4)
{
2321
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2322 2323 2324 2325
	u32 r = 0;
	int add = 0;
	int i;

T
Tomi Valkeinen 已提交
2326 2327 2328 2329
	dsi->vc[0].rx_fifo_size = size1;
	dsi->vc[1].rx_fifo_size = size2;
	dsi->vc[2].rx_fifo_size = size3;
	dsi->vc[3].rx_fifo_size = size4;
T
Tomi Valkeinen 已提交
2330 2331 2332

	for (i = 0; i < 4; i++) {
		u8 v;
T
Tomi Valkeinen 已提交
2333
		int size = dsi->vc[i].rx_fifo_size;
T
Tomi Valkeinen 已提交
2334 2335 2336 2337

		if (add + size > 4) {
			DSSERR("Illegal FIFO configuration\n");
			BUG();
2338
			return;
T
Tomi Valkeinen 已提交
2339 2340 2341 2342 2343 2344 2345 2346
		}

		v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
		r |= v << (8 * i);
		/*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
		add += size;
	}

2347
	dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
T
Tomi Valkeinen 已提交
2348 2349
}

2350
static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
2351 2352 2353
{
	u32 r;

2354
	r = dsi_read_reg(dsidev, DSI_TIMING1);
T
Tomi Valkeinen 已提交
2355
	r = FLD_MOD(r, 1, 15, 15);	/* FORCE_TX_STOP_MODE_IO */
2356
	dsi_write_reg(dsidev, DSI_TIMING1, r);
T
Tomi Valkeinen 已提交
2357

2358
	if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
T
Tomi Valkeinen 已提交
2359 2360 2361 2362 2363 2364 2365
		DSSERR("TX_STOP bit not going down\n");
		return -EIO;
	}

	return 0;
}

2366
static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2367
{
2368
	return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2369 2370 2371 2372
}

static void dsi_packet_sent_handler_vp(void *data, u32 mask)
{
2373 2374 2375
	struct dsi_packet_sent_handler_data *vp_data =
		(struct dsi_packet_sent_handler_data *) data;
	struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2376 2377
	const int channel = dsi->update_channel;
	u8 bit = dsi->te_enabled ? 30 : 31;
2378

2379 2380
	if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
		complete(vp_data->completion);
2381 2382
}

2383
static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2384
{
2385
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2386
	DECLARE_COMPLETION_ONSTACK(completion);
2387 2388 2389 2390
	struct dsi_packet_sent_handler_data vp_data = {
		.dsidev = dsidev,
		.completion = &completion
	};
2391 2392 2393
	int r = 0;
	u8 bit;

2394
	bit = dsi->te_enabled ? 30 : 31;
2395

2396
	r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2397
		&vp_data, DSI_VC_IRQ_PACKET_SENT);
2398 2399 2400 2401
	if (r)
		goto err0;

	/* Wait for completion only if TE_EN/TE_START is still set */
2402
	if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2403 2404 2405 2406 2407 2408 2409 2410
		if (wait_for_completion_timeout(&completion,
				msecs_to_jiffies(10)) == 0) {
			DSSERR("Failed to complete previous frame transfer\n");
			r = -EIO;
			goto err1;
		}
	}

2411
	dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2412
		&vp_data, DSI_VC_IRQ_PACKET_SENT);
2413 2414 2415

	return 0;
err1:
2416
	dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2417
		&vp_data, DSI_VC_IRQ_PACKET_SENT);
2418 2419 2420 2421 2422 2423
err0:
	return r;
}

static void dsi_packet_sent_handler_l4(void *data, u32 mask)
{
2424 2425 2426
	struct dsi_packet_sent_handler_data *l4_data =
		(struct dsi_packet_sent_handler_data *) data;
	struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2427
	const int channel = dsi->update_channel;
2428

2429 2430
	if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
		complete(l4_data->completion);
2431 2432
}

2433
static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2434 2435
{
	DECLARE_COMPLETION_ONSTACK(completion);
2436 2437 2438 2439
	struct dsi_packet_sent_handler_data l4_data = {
		.dsidev = dsidev,
		.completion = &completion
	};
2440
	int r = 0;
2441

2442
	r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2443
		&l4_data, DSI_VC_IRQ_PACKET_SENT);
2444 2445 2446 2447
	if (r)
		goto err0;

	/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2448
	if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2449 2450 2451 2452 2453 2454 2455 2456
		if (wait_for_completion_timeout(&completion,
				msecs_to_jiffies(10)) == 0) {
			DSSERR("Failed to complete previous l4 transfer\n");
			r = -EIO;
			goto err1;
		}
	}

2457
	dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2458
		&l4_data, DSI_VC_IRQ_PACKET_SENT);
2459 2460 2461

	return 0;
err1:
2462
	dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2463
		&l4_data, DSI_VC_IRQ_PACKET_SENT);
2464 2465 2466 2467
err0:
	return r;
}

2468
static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2469
{
2470 2471
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

2472
	WARN_ON(!dsi_bus_is_locked(dsidev));
2473 2474 2475

	WARN_ON(in_interrupt());

2476
	if (!dsi_vc_is_enabled(dsidev, channel))
2477 2478
		return 0;

2479 2480
	switch (dsi->vc[channel].source) {
	case DSI_VC_SOURCE_VP:
2481
		return dsi_sync_vc_vp(dsidev, channel);
2482
	case DSI_VC_SOURCE_L4:
2483
		return dsi_sync_vc_l4(dsidev, channel);
2484 2485
	default:
		BUG();
2486
		return -EINVAL;
2487 2488 2489
	}
}

2490 2491
static int dsi_vc_enable(struct platform_device *dsidev, int channel,
		bool enable)
T
Tomi Valkeinen 已提交
2492
{
2493 2494
	DSSDBG("dsi_vc_enable channel %d, enable %d\n",
			channel, enable);
T
Tomi Valkeinen 已提交
2495 2496 2497

	enable = enable ? 1 : 0;

2498
	REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
T
Tomi Valkeinen 已提交
2499

2500 2501
	if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
		0, enable) != enable) {
T
Tomi Valkeinen 已提交
2502 2503 2504 2505 2506 2507 2508
			DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
			return -EIO;
	}

	return 0;
}

2509
static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
T
Tomi Valkeinen 已提交
2510
{
2511
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2512 2513
	u32 r;

2514
	DSSDBG("Initial config of virtual channel %d", channel);
T
Tomi Valkeinen 已提交
2515

2516
	r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
T
Tomi Valkeinen 已提交
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528

	if (FLD_GET(r, 15, 15)) /* VC_BUSY */
		DSSERR("VC(%d) busy when trying to configure it!\n",
				channel);

	r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
	r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN  */
	r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
	r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
	r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
	r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
	r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2529
	if (dsi->data->quirks & DSI_QUIRK_VC_OCP_WIDTH)
2530
		r = FLD_MOD(r, 3, 11, 10);	/* OCP_WIDTH = 32 bit */
T
Tomi Valkeinen 已提交
2531 2532 2533 2534

	r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
	r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */

2535
	dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2536 2537

	dsi->vc[channel].source = DSI_VC_SOURCE_L4;
T
Tomi Valkeinen 已提交
2538 2539
}

2540 2541
static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
		enum dsi_vc_source source)
T
Tomi Valkeinen 已提交
2542
{
2543 2544
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

2545
	if (dsi->vc[channel].source == source)
2546
		return 0;
T
Tomi Valkeinen 已提交
2547

2548
	DSSDBG("Source config of virtual channel %d", channel);
T
Tomi Valkeinen 已提交
2549

2550
	dsi_sync_vc(dsidev, channel);
2551

2552
	dsi_vc_enable(dsidev, channel, 0);
T
Tomi Valkeinen 已提交
2553

2554
	/* VC_BUSY */
2555
	if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
T
Tomi Valkeinen 已提交
2556
		DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2557 2558
		return -EIO;
	}
T
Tomi Valkeinen 已提交
2559

2560 2561
	/* SOURCE, 0 = L4, 1 = video port */
	REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
T
Tomi Valkeinen 已提交
2562

2563
	/* DCS_CMD_ENABLE */
2564
	if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
2565 2566 2567
		bool enable = source == DSI_VC_SOURCE_VP;
		REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
	}
2568

2569
	dsi_vc_enable(dsidev, channel, 1);
T
Tomi Valkeinen 已提交
2570

2571
	dsi->vc[channel].source = source;
2572 2573

	return 0;
T
Tomi Valkeinen 已提交
2574 2575
}

2576
static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2577
		bool enable)
T
Tomi Valkeinen 已提交
2578
{
2579
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2580
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2581

T
Tomi Valkeinen 已提交
2582 2583
	DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);

2584
	WARN_ON(!dsi_bus_is_locked(dsidev));
2585

2586 2587
	dsi_vc_enable(dsidev, channel, 0);
	dsi_if_enable(dsidev, 0);
T
Tomi Valkeinen 已提交
2588

2589
	REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
T
Tomi Valkeinen 已提交
2590

2591 2592
	dsi_vc_enable(dsidev, channel, 1);
	dsi_if_enable(dsidev, 1);
T
Tomi Valkeinen 已提交
2593

2594
	dsi_force_tx_stop_mode_io(dsidev);
2595 2596

	/* start the DDR clock by sending a NULL packet */
2597
	if (dsi->vm_timings.ddr_clk_always_on && enable)
2598
		dsi_vc_send_null(dssdev, channel);
T
Tomi Valkeinen 已提交
2599 2600
}

2601
static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
T
Tomi Valkeinen 已提交
2602
{
2603
	while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
T
Tomi Valkeinen 已提交
2604
		u32 val;
2605
		val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
T
Tomi Valkeinen 已提交
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650
		DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
				(val >> 0) & 0xff,
				(val >> 8) & 0xff,
				(val >> 16) & 0xff,
				(val >> 24) & 0xff);
	}
}

static void dsi_show_rx_ack_with_err(u16 err)
{
	DSSERR("\tACK with ERROR (%#x):\n", err);
	if (err & (1 << 0))
		DSSERR("\t\tSoT Error\n");
	if (err & (1 << 1))
		DSSERR("\t\tSoT Sync Error\n");
	if (err & (1 << 2))
		DSSERR("\t\tEoT Sync Error\n");
	if (err & (1 << 3))
		DSSERR("\t\tEscape Mode Entry Command Error\n");
	if (err & (1 << 4))
		DSSERR("\t\tLP Transmit Sync Error\n");
	if (err & (1 << 5))
		DSSERR("\t\tHS Receive Timeout Error\n");
	if (err & (1 << 6))
		DSSERR("\t\tFalse Control Error\n");
	if (err & (1 << 7))
		DSSERR("\t\t(reserved7)\n");
	if (err & (1 << 8))
		DSSERR("\t\tECC Error, single-bit (corrected)\n");
	if (err & (1 << 9))
		DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
	if (err & (1 << 10))
		DSSERR("\t\tChecksum Error\n");
	if (err & (1 << 11))
		DSSERR("\t\tData type not recognized\n");
	if (err & (1 << 12))
		DSSERR("\t\tInvalid VC ID\n");
	if (err & (1 << 13))
		DSSERR("\t\tInvalid Transmission Length\n");
	if (err & (1 << 14))
		DSSERR("\t\t(reserved14)\n");
	if (err & (1 << 15))
		DSSERR("\t\tDSI Protocol Violation\n");
}

2651 2652
static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
		int channel)
T
Tomi Valkeinen 已提交
2653 2654
{
	/* RX_FIFO_NOT_EMPTY */
2655
	while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
T
Tomi Valkeinen 已提交
2656 2657
		u32 val;
		u8 dt;
2658
		val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2659
		DSSERR("\trawval %#08x\n", val);
T
Tomi Valkeinen 已提交
2660
		dt = FLD_GET(val, 5, 0);
2661
		if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
T
Tomi Valkeinen 已提交
2662 2663
			u16 err = FLD_GET(val, 23, 8);
			dsi_show_rx_ack_with_err(err);
2664
		} else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2665
			DSSERR("\tDCS short response, 1 byte: %#x\n",
T
Tomi Valkeinen 已提交
2666
					FLD_GET(val, 23, 8));
2667
		} else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2668
			DSSERR("\tDCS short response, 2 byte: %#x\n",
T
Tomi Valkeinen 已提交
2669
					FLD_GET(val, 23, 8));
2670
		} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2671
			DSSERR("\tDCS long response, len %d\n",
T
Tomi Valkeinen 已提交
2672
					FLD_GET(val, 23, 8));
2673
			dsi_vc_flush_long_data(dsidev, channel);
T
Tomi Valkeinen 已提交
2674 2675 2676 2677 2678 2679 2680
		} else {
			DSSERR("\tunknown datatype 0x%02x\n", dt);
		}
	}
	return 0;
}

2681
static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
T
Tomi Valkeinen 已提交
2682
{
2683 2684 2685
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	if (dsi->debug_write || dsi->debug_read)
T
Tomi Valkeinen 已提交
2686 2687
		DSSDBG("dsi_vc_send_bta %d\n", channel);

2688
	WARN_ON(!dsi_bus_is_locked(dsidev));
T
Tomi Valkeinen 已提交
2689

2690 2691
	/* RX_FIFO_NOT_EMPTY */
	if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
T
Tomi Valkeinen 已提交
2692
		DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2693
		dsi_vc_flush_receive_data(dsidev, channel);
T
Tomi Valkeinen 已提交
2694 2695
	}

2696
	REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
T
Tomi Valkeinen 已提交
2697

2698 2699 2700
	/* flush posted write */
	dsi_read_reg(dsidev, DSI_VC_CTRL(channel));

T
Tomi Valkeinen 已提交
2701 2702 2703
	return 0;
}

2704
static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
T
Tomi Valkeinen 已提交
2705
{
2706
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2707
	DECLARE_COMPLETION_ONSTACK(completion);
T
Tomi Valkeinen 已提交
2708 2709 2710
	int r = 0;
	u32 err;

2711
	r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
2712 2713 2714
			&completion, DSI_VC_IRQ_BTA);
	if (r)
		goto err0;
T
Tomi Valkeinen 已提交
2715

2716
	r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
2717
			DSI_IRQ_ERROR_MASK);
T
Tomi Valkeinen 已提交
2718
	if (r)
2719
		goto err1;
T
Tomi Valkeinen 已提交
2720

2721
	r = dsi_vc_send_bta(dsidev, channel);
2722 2723 2724
	if (r)
		goto err2;

2725
	if (wait_for_completion_timeout(&completion,
T
Tomi Valkeinen 已提交
2726 2727 2728
				msecs_to_jiffies(500)) == 0) {
		DSSERR("Failed to receive BTA\n");
		r = -EIO;
2729
		goto err2;
T
Tomi Valkeinen 已提交
2730 2731
	}

2732
	err = dsi_get_errors(dsidev);
T
Tomi Valkeinen 已提交
2733 2734 2735
	if (err) {
		DSSERR("Error while sending BTA: %x\n", err);
		r = -EIO;
2736
		goto err2;
T
Tomi Valkeinen 已提交
2737
	}
2738
err2:
2739
	dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
2740
			DSI_IRQ_ERROR_MASK);
2741
err1:
2742
	dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
2743 2744
			&completion, DSI_VC_IRQ_BTA);
err0:
T
Tomi Valkeinen 已提交
2745 2746 2747
	return r;
}

2748 2749
static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
		int channel, u8 data_type, u16 len, u8 ecc)
T
Tomi Valkeinen 已提交
2750
{
2751
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2752 2753 2754
	u32 val;
	u8 data_id;

2755
	WARN_ON(!dsi_bus_is_locked(dsidev));
T
Tomi Valkeinen 已提交
2756

2757
	data_id = data_type | dsi->vc[channel].vc_id << 6;
T
Tomi Valkeinen 已提交
2758 2759 2760 2761

	val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
		FLD_VAL(ecc, 31, 24);

2762
	dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
T
Tomi Valkeinen 已提交
2763 2764
}

2765 2766
static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
		int channel, u8 b1, u8 b2, u8 b3, u8 b4)
T
Tomi Valkeinen 已提交
2767 2768 2769 2770 2771 2772 2773 2774
{
	u32 val;

	val = b4 << 24 | b3 << 16 | b2 << 8  | b1 << 0;

/*	DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
			b1, b2, b3, b4, val); */

2775
	dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
T
Tomi Valkeinen 已提交
2776 2777
}

2778 2779
static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
		u8 data_type, u8 *data, u16 len, u8 ecc)
T
Tomi Valkeinen 已提交
2780 2781
{
	/*u32 val; */
2782
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2783 2784 2785 2786 2787
	int i;
	u8 *p;
	int r = 0;
	u8 b1, b2, b3, b4;

2788
	if (dsi->debug_write)
T
Tomi Valkeinen 已提交
2789 2790 2791
		DSSDBG("dsi_vc_send_long, %d bytes\n", len);

	/* len + header */
T
Tomi Valkeinen 已提交
2792
	if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) {
T
Tomi Valkeinen 已提交
2793 2794 2795 2796
		DSSERR("unable to send long packet: packet too long.\n");
		return -EINVAL;
	}

2797
	dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
T
Tomi Valkeinen 已提交
2798

2799
	dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
T
Tomi Valkeinen 已提交
2800 2801 2802

	p = data;
	for (i = 0; i < len >> 2; i++) {
2803
		if (dsi->debug_write)
T
Tomi Valkeinen 已提交
2804 2805 2806 2807 2808 2809 2810
			DSSDBG("\tsending full packet %d\n", i);

		b1 = *p++;
		b2 = *p++;
		b3 = *p++;
		b4 = *p++;

2811
		dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
T
Tomi Valkeinen 已提交
2812 2813 2814 2815 2816 2817
	}

	i = len % 4;
	if (i) {
		b1 = 0; b2 = 0; b3 = 0;

2818
		if (dsi->debug_write)
T
Tomi Valkeinen 已提交
2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
			DSSDBG("\tsending remainder bytes %d\n", i);

		switch (i) {
		case 3:
			b1 = *p++;
			b2 = *p++;
			b3 = *p++;
			break;
		case 2:
			b1 = *p++;
			b2 = *p++;
			break;
		case 1:
			b1 = *p++;
			break;
		}

2836
		dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
T
Tomi Valkeinen 已提交
2837 2838 2839 2840 2841
	}

	return r;
}

2842 2843
static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
		u8 data_type, u16 data, u8 ecc)
T
Tomi Valkeinen 已提交
2844
{
2845
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2846 2847 2848
	u32 r;
	u8 data_id;

2849
	WARN_ON(!dsi_bus_is_locked(dsidev));
T
Tomi Valkeinen 已提交
2850

2851
	if (dsi->debug_write)
T
Tomi Valkeinen 已提交
2852 2853 2854 2855
		DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
				channel,
				data_type, data & 0xff, (data >> 8) & 0xff);

2856
	dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
T
Tomi Valkeinen 已提交
2857

2858
	if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
T
Tomi Valkeinen 已提交
2859 2860 2861 2862
		DSSERR("ERROR FIFO FULL, aborting transfer\n");
		return -EINVAL;
	}

2863
	data_id = data_type | dsi->vc[channel].vc_id << 6;
T
Tomi Valkeinen 已提交
2864 2865 2866

	r = (data_id << 0) | (data << 8) | (ecc << 24);

2867
	dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
T
Tomi Valkeinen 已提交
2868 2869 2870 2871

	return 0;
}

2872
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
T
Tomi Valkeinen 已提交
2873
{
2874 2875
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);

2876 2877
	return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
		0, 0);
T
Tomi Valkeinen 已提交
2878 2879
}

2880
static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
2881
		int channel, u8 *data, int len, enum dss_dsi_content_type type)
T
Tomi Valkeinen 已提交
2882 2883 2884
{
	int r;

2885 2886
	if (len == 0) {
		BUG_ON(type == DSS_DSI_CONTENT_DCS);
2887
		r = dsi_vc_send_short(dsidev, channel,
2888 2889 2890 2891 2892
				MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
	} else if (len == 1) {
		r = dsi_vc_send_short(dsidev, channel,
				type == DSS_DSI_CONTENT_GENERIC ?
				MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
2893
				MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
T
Tomi Valkeinen 已提交
2894
	} else if (len == 2) {
2895
		r = dsi_vc_send_short(dsidev, channel,
2896 2897
				type == DSS_DSI_CONTENT_GENERIC ?
				MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
2898
				MIPI_DSI_DCS_SHORT_WRITE_PARAM,
T
Tomi Valkeinen 已提交
2899 2900
				data[0] | (data[1] << 8), 0);
	} else {
2901 2902 2903 2904
		r = dsi_vc_send_long(dsidev, channel,
				type == DSS_DSI_CONTENT_GENERIC ?
				MIPI_DSI_GENERIC_LONG_WRITE :
				MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
T
Tomi Valkeinen 已提交
2905 2906 2907 2908
	}

	return r;
}
2909

2910
static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
2911 2912
		u8 *data, int len)
{
2913 2914 2915
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);

	return dsi_vc_write_nosync_common(dsidev, channel, data, len,
2916 2917
			DSS_DSI_CONTENT_DCS);
}
T
Tomi Valkeinen 已提交
2918

2919
static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
2920 2921
		u8 *data, int len)
{
2922 2923 2924
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);

	return dsi_vc_write_nosync_common(dsidev, channel, data, len,
2925 2926 2927 2928 2929
			DSS_DSI_CONTENT_GENERIC);
}

static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
		u8 *data, int len, enum dss_dsi_content_type type)
T
Tomi Valkeinen 已提交
2930
{
2931
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
T
Tomi Valkeinen 已提交
2932 2933
	int r;

2934
	r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
T
Tomi Valkeinen 已提交
2935
	if (r)
2936
		goto err;
T
Tomi Valkeinen 已提交
2937

2938
	r = dsi_vc_send_bta_sync(dssdev, channel);
2939 2940
	if (r)
		goto err;
T
Tomi Valkeinen 已提交
2941

2942 2943
	/* RX_FIFO_NOT_EMPTY */
	if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2944
		DSSERR("rx fifo not empty after write, dumping data:\n");
2945
		dsi_vc_flush_receive_data(dsidev, channel);
2946 2947 2948 2949
		r = -EIO;
		goto err;
	}

2950 2951
	return 0;
err:
2952
	DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
2953
			channel, data[0], len);
T
Tomi Valkeinen 已提交
2954 2955
	return r;
}
2956

2957
static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
2958 2959 2960 2961 2962
		int len)
{
	return dsi_vc_write_common(dssdev, channel, data, len,
			DSS_DSI_CONTENT_DCS);
}
T
Tomi Valkeinen 已提交
2963

2964
static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
2965 2966 2967 2968 2969 2970
		int len)
{
	return dsi_vc_write_common(dssdev, channel, data, len,
			DSS_DSI_CONTENT_GENERIC);
}

2971
static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
2972
		int channel, u8 dcs_cmd)
T
Tomi Valkeinen 已提交
2973
{
2974
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
2975 2976
	int r;

2977
	if (dsi->debug_read)
2978 2979
		DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
			channel, dcs_cmd);
T
Tomi Valkeinen 已提交
2980

2981
	r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
2982 2983 2984 2985 2986
	if (r) {
		DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
			" failed\n", channel, dcs_cmd);
		return r;
	}
T
Tomi Valkeinen 已提交
2987

2988 2989 2990
	return 0;
}

2991
static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013
		int channel, u8 *reqdata, int reqlen)
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	u16 data;
	u8 data_type;
	int r;

	if (dsi->debug_read)
		DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
			channel, reqlen);

	if (reqlen == 0) {
		data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
		data = 0;
	} else if (reqlen == 1) {
		data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
		data = reqdata[0];
	} else if (reqlen == 2) {
		data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
		data = reqdata[0] | (reqdata[1] << 8);
	} else {
		BUG();
3014
		return -EINVAL;
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028
	}

	r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
	if (r) {
		DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
			" failed\n", channel, reqlen);
		return r;
	}

	return 0;
}

static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
		u8 *buf, int buflen, enum dss_dsi_content_type type)
3029 3030 3031 3032 3033
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	u32 val;
	u8 dt;
	int r;
T
Tomi Valkeinen 已提交
3034 3035

	/* RX_FIFO_NOT_EMPTY */
3036
	if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
T
Tomi Valkeinen 已提交
3037
		DSSERR("RX fifo empty when trying to read.\n");
3038 3039
		r = -EIO;
		goto err;
T
Tomi Valkeinen 已提交
3040 3041
	}

3042
	val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3043
	if (dsi->debug_read)
T
Tomi Valkeinen 已提交
3044 3045
		DSSDBG("\theader: %08x\n", val);
	dt = FLD_GET(val, 5, 0);
3046
	if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
T
Tomi Valkeinen 已提交
3047 3048
		u16 err = FLD_GET(val, 23, 8);
		dsi_show_rx_ack_with_err(err);
3049 3050
		r = -EIO;
		goto err;
T
Tomi Valkeinen 已提交
3051

3052 3053 3054
	} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
			MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
			MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
T
Tomi Valkeinen 已提交
3055
		u8 data = FLD_GET(val, 15, 8);
3056
		if (dsi->debug_read)
3057 3058 3059
			DSSDBG("\t%s short response, 1 byte: %02x\n",
				type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
				"DCS", data);
T
Tomi Valkeinen 已提交
3060

3061 3062 3063 3064
		if (buflen < 1) {
			r = -EIO;
			goto err;
		}
T
Tomi Valkeinen 已提交
3065 3066 3067 3068

		buf[0] = data;

		return 1;
3069 3070 3071
	} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
			MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
			MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
T
Tomi Valkeinen 已提交
3072
		u16 data = FLD_GET(val, 23, 8);
3073
		if (dsi->debug_read)
3074 3075 3076
			DSSDBG("\t%s short response, 2 byte: %04x\n",
				type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
				"DCS", data);
T
Tomi Valkeinen 已提交
3077

3078 3079 3080 3081
		if (buflen < 2) {
			r = -EIO;
			goto err;
		}
T
Tomi Valkeinen 已提交
3082 3083 3084 3085 3086

		buf[0] = data & 0xff;
		buf[1] = (data >> 8) & 0xff;

		return 2;
3087 3088 3089
	} else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
			MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
			MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
T
Tomi Valkeinen 已提交
3090 3091
		int w;
		int len = FLD_GET(val, 23, 8);
3092
		if (dsi->debug_read)
3093 3094 3095
			DSSDBG("\t%s long response, len %d\n",
				type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
				"DCS", len);
T
Tomi Valkeinen 已提交
3096

3097 3098 3099 3100
		if (len > buflen) {
			r = -EIO;
			goto err;
		}
T
Tomi Valkeinen 已提交
3101 3102 3103 3104

		/* two byte checksum ends the packet, not included in len */
		for (w = 0; w < len + 2;) {
			int b;
3105 3106
			val = dsi_read_reg(dsidev,
				DSI_VC_SHORT_PACKET_HEADER(channel));
3107
			if (dsi->debug_read)
T
Tomi Valkeinen 已提交
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
				DSSDBG("\t\t%02x %02x %02x %02x\n",
						(val >> 0) & 0xff,
						(val >> 8) & 0xff,
						(val >> 16) & 0xff,
						(val >> 24) & 0xff);

			for (b = 0; b < 4; ++b) {
				if (w < len)
					buf[w] = (val >> (b * 8)) & 0xff;
				/* we discard the 2 byte checksum */
				++w;
			}
		}

		return len;
	} else {
		DSSERR("\tunknown datatype 0x%02x\n", dt);
3125 3126
		r = -EIO;
		goto err;
T
Tomi Valkeinen 已提交
3127
	}
3128 3129

err:
3130 3131
	DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
		type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3132

3133
	return r;
3134 3135
}

3136
static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3137 3138 3139 3140 3141
		u8 *buf, int buflen)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	int r;

3142
	r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3143 3144
	if (r)
		goto err;
3145

3146 3147 3148 3149
	r = dsi_vc_send_bta_sync(dssdev, channel);
	if (r)
		goto err;

3150 3151
	r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
		DSS_DSI_CONTENT_DCS);
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
	if (r < 0)
		goto err;

	if (r != buflen) {
		r = -EIO;
		goto err;
	}

	return 0;
err:
	DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
	return r;
T
Tomi Valkeinen 已提交
3164 3165
}

3166 3167 3168 3169 3170 3171
static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
		u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	int r;

3172
	r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192
	if (r)
		return r;

	r = dsi_vc_send_bta_sync(dssdev, channel);
	if (r)
		return r;

	r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
		DSS_DSI_CONTENT_GENERIC);
	if (r < 0)
		return r;

	if (r != buflen) {
		r = -EIO;
		return r;
	}

	return 0;
}

3193
static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3194
		u16 len)
T
Tomi Valkeinen 已提交
3195
{
3196 3197
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);

3198 3199
	return dsi_vc_send_short(dsidev, channel,
			MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
T
Tomi Valkeinen 已提交
3200 3201
}

3202
static int dsi_enter_ulps(struct platform_device *dsidev)
3203
{
3204
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3205
	DECLARE_COMPLETION_ONSTACK(completion);
3206 3207
	int r, i;
	unsigned mask;
3208

3209
	DSSDBG("Entering ULPS");
3210

3211
	WARN_ON(!dsi_bus_is_locked(dsidev));
3212

3213
	WARN_ON(dsi->ulps_enabled);
3214

3215
	if (dsi->ulps_enabled)
3216 3217
		return 0;

3218
	/* DDR_CLK_ALWAYS_ON */
3219
	if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3220 3221 3222
		dsi_if_enable(dsidev, 0);
		REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
		dsi_if_enable(dsidev, 1);
3223 3224
	}

3225 3226 3227 3228
	dsi_sync_vc(dsidev, 0);
	dsi_sync_vc(dsidev, 1);
	dsi_sync_vc(dsidev, 2);
	dsi_sync_vc(dsidev, 3);
3229

3230
	dsi_force_tx_stop_mode_io(dsidev);
3231

3232 3233 3234 3235
	dsi_vc_enable(dsidev, 0, false);
	dsi_vc_enable(dsidev, 1, false);
	dsi_vc_enable(dsidev, 2, false);
	dsi_vc_enable(dsidev, 3, false);
3236

3237
	if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) {	/* HS_BUSY */
3238 3239 3240 3241
		DSSERR("HS busy when enabling ULPS\n");
		return -EIO;
	}

3242
	if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) {	/* LP_BUSY */
3243 3244 3245 3246
		DSSERR("LP busy when enabling ULPS\n");
		return -EIO;
	}

3247
	r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3248 3249 3250 3251
			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
	if (r)
		return r;

3252 3253 3254 3255 3256 3257 3258
	mask = 0;

	for (i = 0; i < dsi->num_lanes_supported; ++i) {
		if (dsi->lanes[i].function == DSI_LANE_UNUSED)
			continue;
		mask |= 1 << i;
	}
3259 3260
	/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
	/* LANEx_ULPS_SIG2 */
3261
	REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
3262

3263 3264
	/* flush posted write and wait for SCP interface to finish the write */
	dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3265 3266 3267 3268 3269 3270 3271 3272

	if (wait_for_completion_timeout(&completion,
				msecs_to_jiffies(1000)) == 0) {
		DSSERR("ULPS enable timeout\n");
		r = -EIO;
		goto err;
	}

3273
	dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3274 3275
			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);

3276
	/* Reset LANEx_ULPS_SIG2 */
3277
	REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
3278

3279 3280
	/* flush posted write and wait for SCP interface to finish the write */
	dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3281

3282
	dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3283

3284
	dsi_if_enable(dsidev, false);
3285

3286
	dsi->ulps_enabled = true;
3287 3288 3289 3290

	return 0;

err:
3291
	dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3292 3293 3294 3295
			DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
	return r;
}

3296 3297
static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
		unsigned ticks, bool x4, bool x16)
T
Tomi Valkeinen 已提交
3298 3299
{
	unsigned long fck;
3300 3301
	unsigned long total_ticks;
	u32 r;
T
Tomi Valkeinen 已提交
3302

3303
	BUG_ON(ticks > 0x1fff);
T
Tomi Valkeinen 已提交
3304

3305
	/* ticks in DSI_FCK */
3306
	fck = dsi_fclk_rate(dsidev);
T
Tomi Valkeinen 已提交
3307

3308
	r = dsi_read_reg(dsidev, DSI_TIMING2);
T
Tomi Valkeinen 已提交
3309
	r = FLD_MOD(r, 1, 15, 15);	/* LP_RX_TO */
3310 3311
	r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);	/* LP_RX_TO_X16 */
	r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);	/* LP_RX_TO_X4 */
T
Tomi Valkeinen 已提交
3312
	r = FLD_MOD(r, ticks, 12, 0);	/* LP_RX_COUNTER */
3313
	dsi_write_reg(dsidev, DSI_TIMING2, r);
T
Tomi Valkeinen 已提交
3314

3315 3316 3317 3318 3319 3320
	total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);

	DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
			total_ticks,
			ticks, x4 ? " x4" : "", x16 ? " x16" : "",
			(total_ticks * 1000) / (fck / 1000 / 1000));
T
Tomi Valkeinen 已提交
3321 3322
}

3323 3324
static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
		bool x8, bool x16)
T
Tomi Valkeinen 已提交
3325 3326
{
	unsigned long fck;
3327 3328 3329 3330
	unsigned long total_ticks;
	u32 r;

	BUG_ON(ticks > 0x1fff);
T
Tomi Valkeinen 已提交
3331 3332

	/* ticks in DSI_FCK */
3333
	fck = dsi_fclk_rate(dsidev);
T
Tomi Valkeinen 已提交
3334

3335
	r = dsi_read_reg(dsidev, DSI_TIMING1);
T
Tomi Valkeinen 已提交
3336
	r = FLD_MOD(r, 1, 31, 31);	/* TA_TO */
3337 3338
	r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);	/* TA_TO_X16 */
	r = FLD_MOD(r, x8 ? 1 : 0, 29, 29);	/* TA_TO_X8 */
T
Tomi Valkeinen 已提交
3339
	r = FLD_MOD(r, ticks, 28, 16);	/* TA_TO_COUNTER */
3340
	dsi_write_reg(dsidev, DSI_TIMING1, r);
T
Tomi Valkeinen 已提交
3341

3342 3343 3344 3345 3346 3347
	total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);

	DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
			total_ticks,
			ticks, x8 ? " x8" : "", x16 ? " x16" : "",
			(total_ticks * 1000) / (fck / 1000 / 1000));
T
Tomi Valkeinen 已提交
3348 3349
}

3350 3351
static void dsi_set_stop_state_counter(struct platform_device *dsidev,
		unsigned ticks, bool x4, bool x16)
T
Tomi Valkeinen 已提交
3352 3353
{
	unsigned long fck;
3354 3355
	unsigned long total_ticks;
	u32 r;
T
Tomi Valkeinen 已提交
3356

3357
	BUG_ON(ticks > 0x1fff);
T
Tomi Valkeinen 已提交
3358

3359
	/* ticks in DSI_FCK */
3360
	fck = dsi_fclk_rate(dsidev);
T
Tomi Valkeinen 已提交
3361

3362
	r = dsi_read_reg(dsidev, DSI_TIMING1);
T
Tomi Valkeinen 已提交
3363
	r = FLD_MOD(r, 1, 15, 15);	/* FORCE_TX_STOP_MODE_IO */
3364 3365
	r = FLD_MOD(r, x16 ? 1 : 0, 14, 14);	/* STOP_STATE_X16_IO */
	r = FLD_MOD(r, x4 ? 1 : 0, 13, 13);	/* STOP_STATE_X4_IO */
T
Tomi Valkeinen 已提交
3366
	r = FLD_MOD(r, ticks, 12, 0);	/* STOP_STATE_COUNTER_IO */
3367
	dsi_write_reg(dsidev, DSI_TIMING1, r);
T
Tomi Valkeinen 已提交
3368

3369 3370 3371 3372 3373 3374
	total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);

	DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
			total_ticks,
			ticks, x4 ? " x4" : "", x16 ? " x16" : "",
			(total_ticks * 1000) / (fck / 1000 / 1000));
T
Tomi Valkeinen 已提交
3375 3376
}

3377 3378
static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
		unsigned ticks, bool x4, bool x16)
T
Tomi Valkeinen 已提交
3379 3380
{
	unsigned long fck;
3381 3382
	unsigned long total_ticks;
	u32 r;
T
Tomi Valkeinen 已提交
3383

3384
	BUG_ON(ticks > 0x1fff);
T
Tomi Valkeinen 已提交
3385

3386
	/* ticks in TxByteClkHS */
3387
	fck = dsi_get_txbyteclkhs(dsidev);
T
Tomi Valkeinen 已提交
3388

3389
	r = dsi_read_reg(dsidev, DSI_TIMING2);
T
Tomi Valkeinen 已提交
3390
	r = FLD_MOD(r, 1, 31, 31);	/* HS_TX_TO */
3391 3392
	r = FLD_MOD(r, x16 ? 1 : 0, 30, 30);	/* HS_TX_TO_X16 */
	r = FLD_MOD(r, x4 ? 1 : 0, 29, 29);	/* HS_TX_TO_X8 (4 really) */
T
Tomi Valkeinen 已提交
3393
	r = FLD_MOD(r, ticks, 28, 16);	/* HS_TX_TO_COUNTER */
3394
	dsi_write_reg(dsidev, DSI_TIMING2, r);
T
Tomi Valkeinen 已提交
3395

3396 3397 3398 3399 3400 3401
	total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);

	DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
			total_ticks,
			ticks, x4 ? " x4" : "", x16 ? " x16" : "",
			(total_ticks * 1000) / (fck / 1000 / 1000));
T
Tomi Valkeinen 已提交
3402
}
3403

3404
static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3405
{
3406
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3407 3408
	int num_line_buffers;

3409
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3410
		int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3411
		struct videomode *vm = &dsi->vm;
3412 3413 3414 3415
		/*
		 * Don't use line buffers if width is greater than the video
		 * port's line buffer size
		 */
3416
		if (dsi->line_buffer_size <= vm->hactive * bpp / 8)
3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
			num_line_buffers = 0;
		else
			num_line_buffers = 2;
	} else {
		/* Use maximum number of line buffers in command mode */
		num_line_buffers = 2;
	}

	/* LINE_BUFFER */
	REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
}

3429
static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3430
{
3431
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3432
	bool sync_end;
3433 3434
	u32 r;

3435 3436 3437 3438 3439
	if (dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE)
		sync_end = true;
	else
		sync_end = false;

3440
	r = dsi_read_reg(dsidev, DSI_CTRL);
3441 3442 3443
	r = FLD_MOD(r, 1, 9, 9);		/* VP_DE_POL */
	r = FLD_MOD(r, 1, 10, 10);		/* VP_HSYNC_POL */
	r = FLD_MOD(r, 1, 11, 11);		/* VP_VSYNC_POL */
3444
	r = FLD_MOD(r, 1, 15, 15);		/* VP_VSYNC_START */
3445
	r = FLD_MOD(r, sync_end, 16, 16);	/* VP_VSYNC_END */
3446
	r = FLD_MOD(r, 1, 17, 17);		/* VP_HSYNC_START */
3447
	r = FLD_MOD(r, sync_end, 18, 18);	/* VP_HSYNC_END */
3448 3449 3450
	dsi_write_reg(dsidev, DSI_CTRL, r);
}

3451
static void dsi_config_blanking_modes(struct platform_device *dsidev)
3452
{
3453 3454 3455 3456 3457
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int blanking_mode = dsi->vm_timings.blanking_mode;
	int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
	int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
	int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
	u32 r;

	/*
	 * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
	 * 1 = Long blanking packets are sent in corresponding blanking periods
	 */
	r = dsi_read_reg(dsidev, DSI_CTRL);
	r = FLD_MOD(r, blanking_mode, 20, 20);		/* BLANKING_MODE */
	r = FLD_MOD(r, hfp_blanking_mode, 21, 21);	/* HFP_BLANKING */
	r = FLD_MOD(r, hbp_blanking_mode, 22, 22);	/* HBP_BLANKING */
	r = FLD_MOD(r, hsa_blanking_mode, 23, 23);	/* HSA_BLANKING */
	dsi_write_reg(dsidev, DSI_CTRL, r);
}

3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
/*
 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
 * results in maximum transition time for data and clock lanes to enter and
 * exit HS mode. Hence, this is the scenario where the least amount of command
 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
 * clock cycles that can be used to interleave command mode data in HS so that
 * all scenarios are satisfied.
 */
static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
		int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
{
	int transition;

	/*
	 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
	 * time of data lanes only, if it isn't set, we need to consider HS
	 * transition time of both data and clock lanes. HS transition time
	 * of Scenario 3 is considered.
	 */
	if (ddr_alwon) {
		transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
	} else {
		int trans1, trans2;
		trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
		trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
				enter_hs + 1;
		transition = max(trans1, trans2);
	}

	return blank > transition ? blank - transition : 0;
}

/*
 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
 * results in maximum transition time for data lanes to enter and exit LP mode.
 * Hence, this is the scenario where the least amount of command mode data can
 * be interleaved. We program the minimum amount of bytes that can be
 * interleaved in LP so that all scenarios are satisfied.
 */
static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
		int lp_clk_div, int tdsi_fclk)
{
	int trans_lp;	/* time required for a LP transition, in TXBYTECLKHS */
	int tlp_avail;	/* time left for interleaving commands, in CLKIN4DDR */
	int ttxclkesc;	/* period of LP transmit escape clock, in CLKIN4DDR */
	int thsbyte_clk = 16;	/* Period of TXBYTECLKHS clock, in CLKIN4DDR */
	int lp_inter;	/* cmd mode data that can be interleaved, in bytes */

	/* maximum LP transition time according to Scenario 1 */
	trans_lp = exit_hs + max(enter_hs, 2) + 1;

	/* CLKIN4DDR = 16 * TXBYTECLKHS */
	tlp_avail = thsbyte_clk * (blank - trans_lp);

3526
	ttxclkesc = tdsi_fclk * lp_clk_div;
3527 3528 3529 3530 3531 3532 3533

	lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
			26) / 16;

	return max(lp_inter, 0);
}

3534
static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
3535 3536 3537 3538 3539 3540 3541 3542
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int blanking_mode;
	int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
	int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
	int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
	int tclk_trail, ths_exit, exiths_clk;
	bool ddr_alwon;
3543
	struct videomode *vm = &dsi->vm;
3544
	int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3545
	int ndl = dsi->num_lanes_used - 1;
3546
	int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
	int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
	int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
	int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
	int bl_interleave_hs = 0, bl_interleave_lp = 0;
	u32 r;

	r = dsi_read_reg(dsidev, DSI_CTRL);
	blanking_mode = FLD_GET(r, 20, 20);
	hfp_blanking_mode = FLD_GET(r, 21, 21);
	hbp_blanking_mode = FLD_GET(r, 22, 22);
	hsa_blanking_mode = FLD_GET(r, 23, 23);

	r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
	hbp = FLD_GET(r, 11, 0);
	hfp = FLD_GET(r, 23, 12);
	hsa = FLD_GET(r, 31, 24);

	r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
	ddr_clk_post = FLD_GET(r, 7, 0);
	ddr_clk_pre = FLD_GET(r, 15, 8);

	r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
	exit_hs_mode_lat = FLD_GET(r, 15, 0);
	enter_hs_mode_lat = FLD_GET(r, 31, 16);

	r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
	lp_clk_div = FLD_GET(r, 12, 0);
	ddr_alwon = FLD_GET(r, 13, 13);

	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
	ths_exit = FLD_GET(r, 7, 0);

	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
	tclk_trail = FLD_GET(r, 15, 8);

	exiths_clk = ths_exit + tclk_trail;

3584
	width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
	bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);

	if (!hsa_blanking_mode) {
		hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
					enter_hs_mode_lat, exit_hs_mode_lat,
					exiths_clk, ddr_clk_pre, ddr_clk_post);
		hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
					enter_hs_mode_lat, exit_hs_mode_lat,
					lp_clk_div, dsi_fclk_hsdiv);
	}

	if (!hfp_blanking_mode) {
		hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
					enter_hs_mode_lat, exit_hs_mode_lat,
					exiths_clk, ddr_clk_pre, ddr_clk_post);
		hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
					enter_hs_mode_lat, exit_hs_mode_lat,
					lp_clk_div, dsi_fclk_hsdiv);
	}

	if (!hbp_blanking_mode) {
		hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
					enter_hs_mode_lat, exit_hs_mode_lat,
					exiths_clk, ddr_clk_pre, ddr_clk_post);

		hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
					enter_hs_mode_lat, exit_hs_mode_lat,
					lp_clk_div, dsi_fclk_hsdiv);
	}

	if (!blanking_mode) {
		bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
					enter_hs_mode_lat, exit_hs_mode_lat,
					exiths_clk, ddr_clk_pre, ddr_clk_post);

		bl_interleave_lp = dsi_compute_interleave_lp(bllp,
					enter_hs_mode_lat, exit_hs_mode_lat,
					lp_clk_div, dsi_fclk_hsdiv);
	}

	DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
		hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
		bl_interleave_hs);

	DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
		hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
		bl_interleave_lp);

	r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
	r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
	r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
	r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
	dsi_write_reg(dsidev, DSI_VM_TIMING4, r);

	r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
	r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
	r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
	r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
	dsi_write_reg(dsidev, DSI_VM_TIMING5, r);

	r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
	r = FLD_MOD(r, bl_interleave_hs, 31, 15);
	r = FLD_MOD(r, bl_interleave_lp, 16, 0);
	dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
}

3651
static int dsi_proto_config(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
3652
{
3653
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
3654 3655 3656
	u32 r;
	int buswidth = 0;

3657
	dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
3658 3659 3660
			DSI_FIFO_SIZE_32,
			DSI_FIFO_SIZE_32,
			DSI_FIFO_SIZE_32);
T
Tomi Valkeinen 已提交
3661

3662
	dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
3663 3664 3665
			DSI_FIFO_SIZE_32,
			DSI_FIFO_SIZE_32,
			DSI_FIFO_SIZE_32);
T
Tomi Valkeinen 已提交
3666 3667

	/* XXX what values for the timeouts? */
3668 3669 3670 3671
	dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
	dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
	dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
	dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
T
Tomi Valkeinen 已提交
3672

3673
	switch (dsi_get_pixel_size(dsi->pix_fmt)) {
T
Tomi Valkeinen 已提交
3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684
	case 16:
		buswidth = 0;
		break;
	case 18:
		buswidth = 1;
		break;
	case 24:
		buswidth = 2;
		break;
	default:
		BUG();
3685
		return -EINVAL;
T
Tomi Valkeinen 已提交
3686 3687
	}

3688
	r = dsi_read_reg(dsidev, DSI_CTRL);
T
Tomi Valkeinen 已提交
3689 3690 3691 3692 3693 3694 3695 3696
	r = FLD_MOD(r, 1, 1, 1);	/* CS_RX_EN */
	r = FLD_MOD(r, 1, 2, 2);	/* ECC_RX_EN */
	r = FLD_MOD(r, 1, 3, 3);	/* TX_FIFO_ARBITRATION */
	r = FLD_MOD(r, 1, 4, 4);	/* VP_CLK_RATIO, always 1, see errata*/
	r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
	r = FLD_MOD(r, 0, 8, 8);	/* VP_CLK_POL */
	r = FLD_MOD(r, 1, 14, 14);	/* TRIGGER_RESET_MODE */
	r = FLD_MOD(r, 1, 19, 19);	/* EOT_ENABLE */
3697
	if (!(dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC)) {
3698 3699 3700 3701
		r = FLD_MOD(r, 1, 24, 24);	/* DCS_CMD_ENABLE */
		/* DCS_CMD_CODE, 1=start, 0=continue */
		r = FLD_MOD(r, 0, 25, 25);
	}
T
Tomi Valkeinen 已提交
3702

3703
	dsi_write_reg(dsidev, DSI_CTRL, r);
T
Tomi Valkeinen 已提交
3704

3705
	dsi_config_vp_num_line_buffers(dsidev);
3706

3707
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3708 3709
		dsi_config_vp_sync_events(dsidev);
		dsi_config_blanking_modes(dsidev);
3710
		dsi_config_cmd_mode_interleaving(dsidev);
3711 3712
	}

3713 3714 3715 3716
	dsi_vc_initial_config(dsidev, 0);
	dsi_vc_initial_config(dsidev, 1);
	dsi_vc_initial_config(dsidev, 2);
	dsi_vc_initial_config(dsidev, 3);
T
Tomi Valkeinen 已提交
3717 3718 3719 3720

	return 0;
}

3721
static void dsi_proto_timings(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
3722
{
3723
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
3724 3725 3726 3727 3728 3729 3730
	unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
	unsigned tclk_pre, tclk_post;
	unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
	unsigned ths_trail, ths_exit;
	unsigned ddr_clk_pre, ddr_clk_post;
	unsigned enter_hs_mode_lat, exit_hs_mode_lat;
	unsigned ths_eot;
3731
	int ndl = dsi->num_lanes_used - 1;
T
Tomi Valkeinen 已提交
3732 3733
	u32 r;

3734
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
T
Tomi Valkeinen 已提交
3735 3736 3737 3738 3739 3740
	ths_prepare = FLD_GET(r, 31, 24);
	ths_prepare_ths_zero = FLD_GET(r, 23, 16);
	ths_zero = ths_prepare_ths_zero - ths_prepare;
	ths_trail = FLD_GET(r, 15, 8);
	ths_exit = FLD_GET(r, 7, 0);

3741
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3742
	tlpx = FLD_GET(r, 20, 16) * 2;
T
Tomi Valkeinen 已提交
3743 3744 3745
	tclk_trail = FLD_GET(r, 15, 8);
	tclk_zero = FLD_GET(r, 7, 0);

3746
	r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
T
Tomi Valkeinen 已提交
3747 3748 3749 3750 3751
	tclk_prepare = FLD_GET(r, 7, 0);

	/* min 8*UI */
	tclk_pre = 20;
	/* min 60ns + 52*UI */
3752
	tclk_post = ns2ddr(dsidev, 60) + 26;
T
Tomi Valkeinen 已提交
3753

3754
	ths_eot = DIV_ROUND_UP(4, ndl);
T
Tomi Valkeinen 已提交
3755 3756 3757 3758 3759 3760 3761 3762

	ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
			4);
	ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;

	BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
	BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);

3763
	r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
T
Tomi Valkeinen 已提交
3764 3765
	r = FLD_MOD(r, ddr_clk_pre, 15, 8);
	r = FLD_MOD(r, ddr_clk_post, 7, 0);
3766
	dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
T
Tomi Valkeinen 已提交
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779

	DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
			ddr_clk_pre,
			ddr_clk_post);

	enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
		DIV_ROUND_UP(ths_prepare, 4) +
		DIV_ROUND_UP(ths_zero + 3, 4);

	exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;

	r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
		FLD_VAL(exit_hs_mode_lat, 15, 0);
3780
	dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
T
Tomi Valkeinen 已提交
3781 3782 3783

	DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
			enter_hs_mode_lat, exit_hs_mode_lat);
3784

3785
	 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3786
		/* TODO: Implement a video mode check_timings function */
3787 3788 3789 3790 3791 3792 3793
		int hsa = dsi->vm_timings.hsa;
		int hfp = dsi->vm_timings.hfp;
		int hbp = dsi->vm_timings.hbp;
		int vsa = dsi->vm_timings.vsa;
		int vfp = dsi->vm_timings.vfp;
		int vbp = dsi->vm_timings.vbp;
		int window_sync = dsi->vm_timings.window_sync;
3794
		bool hsync_end;
3795
		struct videomode *vm = &dsi->vm;
3796
		int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3797 3798
		int tl, t_he, width_bytes;

3799
		hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE;
3800 3801 3802
		t_he = hsync_end ?
			((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;

3803
		width_bytes = DIV_ROUND_UP(vm->hactive * bpp, 8);
3804 3805 3806 3807 3808 3809 3810 3811

		/* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
		tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
			DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;

		DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
			hfp, hsync_end ? hsa : 0, tl);
		DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
3812
			vsa, vm->vactive);
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827

		r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
		r = FLD_MOD(r, hbp, 11, 0);	/* HBP */
		r = FLD_MOD(r, hfp, 23, 12);	/* HFP */
		r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24);	/* HSA */
		dsi_write_reg(dsidev, DSI_VM_TIMING1, r);

		r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
		r = FLD_MOD(r, vbp, 7, 0);	/* VBP */
		r = FLD_MOD(r, vfp, 15, 8);	/* VFP */
		r = FLD_MOD(r, vsa, 23, 16);	/* VSA */
		r = FLD_MOD(r, window_sync, 27, 24);	/* WINDOW_SYNC */
		dsi_write_reg(dsidev, DSI_VM_TIMING2, r);

		r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
3828
		r = FLD_MOD(r, vm->vactive, 14, 0);	/* VACT */
3829 3830 3831 3832 3833
		r = FLD_MOD(r, tl, 31, 16);		/* TL */
		dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
	}
}

3834
static int dsi_configure_pins(struct omap_dss_device *dssdev,
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900
		const struct omap_dsi_pin_config *pin_cfg)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int num_pins;
	const int *pins;
	struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
	int num_lanes;
	int i;

	static const enum dsi_lane_function functions[] = {
		DSI_LANE_CLK,
		DSI_LANE_DATA1,
		DSI_LANE_DATA2,
		DSI_LANE_DATA3,
		DSI_LANE_DATA4,
	};

	num_pins = pin_cfg->num_pins;
	pins = pin_cfg->pins;

	if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
			|| num_pins % 2 != 0)
		return -EINVAL;

	for (i = 0; i < DSI_MAX_NR_LANES; ++i)
		lanes[i].function = DSI_LANE_UNUSED;

	num_lanes = 0;

	for (i = 0; i < num_pins; i += 2) {
		u8 lane, pol;
		int dx, dy;

		dx = pins[i];
		dy = pins[i + 1];

		if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
			return -EINVAL;

		if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
			return -EINVAL;

		if (dx & 1) {
			if (dy != dx - 1)
				return -EINVAL;
			pol = 1;
		} else {
			if (dy != dx + 1)
				return -EINVAL;
			pol = 0;
		}

		lane = dx / 2;

		lanes[lane].function = functions[i / 2];
		lanes[lane].polarity = pol;
		num_lanes++;
	}

	memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
	dsi->num_lanes_used = num_lanes;

	return 0;
}

3901
static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
3902 3903
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3904
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3905
	enum omap_channel dispc_channel = dssdev->dispc_channel;
3906
	int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3907
	struct omap_dss_device *out = &dsi->output;
3908 3909
	u8 data_type;
	u16 word_count;
3910
	int r;
3911

3912
	if (!out->dispc_channel_connected) {
3913 3914 3915 3916
		DSSERR("failed to enable display: no output/manager\n");
		return -ENODEV;
	}

3917
	r = dsi_display_init_dispc(dsidev, dispc_channel);
3918 3919 3920
	if (r)
		goto err_init_dispc;

3921
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3922
		switch (dsi->pix_fmt) {
3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935
		case OMAP_DSS_DSI_FMT_RGB888:
			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
			break;
		case OMAP_DSS_DSI_FMT_RGB666:
			data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
			break;
		case OMAP_DSS_DSI_FMT_RGB666_PACKED:
			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
			break;
		case OMAP_DSS_DSI_FMT_RGB565:
			data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
			break;
		default:
3936 3937
			r = -EINVAL;
			goto err_pix_fmt;
J
Joe Perches 已提交
3938
		}
3939

3940 3941
		dsi_if_enable(dsidev, false);
		dsi_vc_enable(dsidev, channel, false);
3942

3943 3944
		/* MODE, 1 = video mode */
		REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
3945

3946
		word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
3947

3948 3949
		dsi_vc_write_long_header(dsidev, channel, data_type,
				word_count, 0);
3950

3951 3952 3953
		dsi_vc_enable(dsidev, channel, true);
		dsi_if_enable(dsidev, true);
	}
3954

3955
	r = dss_mgr_enable(dispc_channel);
3956 3957
	if (r)
		goto err_mgr_enable;
3958 3959

	return 0;
3960 3961 3962 3963 3964 3965 3966

err_mgr_enable:
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
		dsi_if_enable(dsidev, false);
		dsi_vc_enable(dsidev, channel, false);
	}
err_pix_fmt:
3967
	dsi_display_uninit_dispc(dsidev, dispc_channel);
3968 3969
err_init_dispc:
	return r;
3970 3971
}

3972
static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
3973 3974
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3975
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3976
	enum omap_channel dispc_channel = dssdev->dispc_channel;
3977

3978
	if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3979 3980
		dsi_if_enable(dsidev, false);
		dsi_vc_enable(dsidev, channel, false);
3981

3982 3983
		/* MODE, 0 = command mode */
		REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
3984

3985 3986 3987
		dsi_vc_enable(dsidev, channel, true);
		dsi_if_enable(dsidev, true);
	}
3988

3989
	dss_mgr_disable(dispc_channel);
3990

3991
	dsi_display_uninit_dispc(dsidev, dispc_channel);
T
Tomi Valkeinen 已提交
3992 3993
}

3994
static void dsi_update_screen_dispc(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
3995
{
3996
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3997
	enum omap_channel dispc_channel = dsi->output.dispc_channel;
T
Tomi Valkeinen 已提交
3998 3999 4000 4001 4002 4003 4004
	unsigned bytespp;
	unsigned bytespl;
	unsigned bytespf;
	unsigned total_len;
	unsigned packet_payload;
	unsigned packet_len;
	u32 l;
4005
	int r;
4006
	const unsigned channel = dsi->update_channel;
4007
	const unsigned line_buf_size = dsi->line_buffer_size;
4008 4009
	u16 w = dsi->vm.hactive;
	u16 h = dsi->vm.vactive;
T
Tomi Valkeinen 已提交
4010

4011
	DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
T
Tomi Valkeinen 已提交
4012

4013
	dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
4014

4015
	bytespp	= dsi_get_pixel_size(dsi->pix_fmt) / 8;
T
Tomi Valkeinen 已提交
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033
	bytespl = w * bytespp;
	bytespf = bytespl * h;

	/* NOTE: packet_payload has to be equal to N * bytespl, where N is
	 * number of lines in a packet.  See errata about VP_CLK_RATIO */

	if (bytespf < line_buf_size)
		packet_payload = bytespf;
	else
		packet_payload = (line_buf_size) / bytespl * bytespl;

	packet_len = packet_payload + 1;	/* 1 byte for DCS cmd */
	total_len = (bytespf / packet_payload) * packet_len;

	if (bytespf % packet_payload)
		total_len += (bytespf % packet_payload) + 1;

	l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
4034
	dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
T
Tomi Valkeinen 已提交
4035

4036
	dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
4037
		packet_len, 0);
T
Tomi Valkeinen 已提交
4038

4039
	if (dsi->te_enabled)
T
Tomi Valkeinen 已提交
4040 4041 4042
		l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
	else
		l = FLD_MOD(l, 1, 31, 31); /* TE_START */
4043
	dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
T
Tomi Valkeinen 已提交
4044 4045 4046 4047 4048 4049 4050 4051 4052

	/* We put SIDLEMODE to no-idle for the duration of the transfer,
	 * because DSS interrupts are not capable of waking up the CPU and the
	 * framedone interrupt could be delayed for quite a long time. I think
	 * the same goes for any DSS interrupts, but for some reason I have not
	 * seen the problem anywhere else than here.
	 */
	dispc_disable_sidle();

4053
	dsi_perf_mark_start(dsidev);
4054

4055 4056
	r = schedule_delayed_work(&dsi->framedone_timeout_work,
		msecs_to_jiffies(250));
4057
	BUG_ON(r == 0);
4058

4059
	dss_mgr_set_timings(dispc_channel, &dsi->vm);
4060

4061
	dss_mgr_start_update(dispc_channel);
T
Tomi Valkeinen 已提交
4062

4063
	if (dsi->te_enabled) {
T
Tomi Valkeinen 已提交
4064 4065
		/* disable LP_RX_TO, so that we can receive TE.  Time to wait
		 * for TE is longer than the timer allows */
4066
		REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
T
Tomi Valkeinen 已提交
4067

4068
		dsi_vc_send_bta(dsidev, channel);
T
Tomi Valkeinen 已提交
4069 4070

#ifdef DSI_CATCH_MISSING_TE
4071
		mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
T
Tomi Valkeinen 已提交
4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
#endif
	}
}

#ifdef DSI_CATCH_MISSING_TE
static void dsi_te_timeout(unsigned long arg)
{
	DSSERR("TE not received for 250ms!\n");
}
#endif

4083
static void dsi_handle_framedone(struct platform_device *dsidev, int error)
T
Tomi Valkeinen 已提交
4084
{
4085 4086
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

T
Tomi Valkeinen 已提交
4087 4088 4089
	/* SIDLEMODE back to smart-idle */
	dispc_enable_sidle();

4090
	if (dsi->te_enabled) {
4091
		/* enable LP_RX_TO again after the TE */
4092
		REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
T
Tomi Valkeinen 已提交
4093 4094
	}

4095
	dsi->framedone_callback(error, dsi->framedone_data);
4096 4097

	if (!error)
4098
		dsi_perf_show(dsidev, "DISPC");
4099
}
T
Tomi Valkeinen 已提交
4100

4101
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4102
{
4103 4104
	struct dsi_data *dsi = container_of(work, struct dsi_data,
			framedone_timeout_work.work);
4105 4106 4107 4108 4109 4110
	/* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
	 * 250ms which would conflict with this timeout work. What should be
	 * done is first cancel the transfer on the HW, and then cancel the
	 * possibly scheduled framedone work. However, cancelling the transfer
	 * on the HW is buggy, and would probably require resetting the whole
	 * DSI */
4111

4112
	DSSERR("Framedone not received for 250ms!\n");
T
Tomi Valkeinen 已提交
4113

4114
	dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
T
Tomi Valkeinen 已提交
4115 4116
}

4117
static void dsi_framedone_irq_callback(void *data)
T
Tomi Valkeinen 已提交
4118
{
4119
	struct platform_device *dsidev = (struct platform_device *) data;
4120 4121
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

4122 4123 4124 4125
	/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
	 * turns itself off. However, DSI still has the pixels in its buffers,
	 * and is sending the data.
	 */
T
Tomi Valkeinen 已提交
4126

4127
	cancel_delayed_work(&dsi->framedone_timeout_work);
T
Tomi Valkeinen 已提交
4128

4129
	dsi_handle_framedone(dsidev, 0);
4130
}
T
Tomi Valkeinen 已提交
4131

4132
static int dsi_update(struct omap_dss_device *dssdev, int channel,
4133
		void (*callback)(int, void *), void *data)
4134
{
4135
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4136
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4137
	u16 dw, dh;
T
Tomi Valkeinen 已提交
4138

4139
	dsi_perf_mark_setup(dsidev);
T
Tomi Valkeinen 已提交
4140

4141
	dsi->update_channel = channel;
T
Tomi Valkeinen 已提交
4142

4143 4144
	dsi->framedone_callback = callback;
	dsi->framedone_data = data;
4145

4146 4147
	dw = dsi->vm.hactive;
	dh = dsi->vm.vactive;
4148

4149
#ifdef DSI_PERF_MEASURE
4150
	dsi->update_bytes = dw * dh *
4151
		dsi_get_pixel_size(dsi->pix_fmt) / 8;
4152
#endif
4153
	dsi_update_screen_dispc(dsidev);
T
Tomi Valkeinen 已提交
4154 4155 4156 4157 4158 4159

	return 0;
}

/* Display funcs */

4160
static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
4161
{
4162 4163
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	struct dispc_clock_info dispc_cinfo;
T
Tomi Valkeinen 已提交
4164
	int r;
4165
	unsigned long fck;
4166 4167 4168

	fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);

4169 4170
	dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
	dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182

	r = dispc_calc_clock_rates(fck, &dispc_cinfo);
	if (r) {
		DSSERR("Failed to calc dispc clocks\n");
		return r;
	}

	dsi->mgr_config.clock_info = dispc_cinfo;

	return 0;
}

4183
static int dsi_display_init_dispc(struct platform_device *dsidev,
4184
		enum omap_channel channel)
4185 4186 4187
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	int r;
T
Tomi Valkeinen 已提交
4188

4189
	dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
4190 4191
			DSS_CLK_SRC_PLL1_1 :
			DSS_CLK_SRC_PLL2_1);
4192

4193
	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4194
		r = dss_mgr_register_framedone_handler(channel,
4195
				dsi_framedone_irq_callback, dsidev);
4196
		if (r) {
4197
			DSSERR("can't register FRAMEDONE handler\n");
4198
			goto err;
4199 4200
		}

4201 4202
		dsi->mgr_config.stallmode = true;
		dsi->mgr_config.fifohandcheck = true;
4203
	} else {
4204 4205
		dsi->mgr_config.stallmode = false;
		dsi->mgr_config.fifohandcheck = false;
T
Tomi Valkeinen 已提交
4206 4207
	}

4208 4209
	/*
	 * override interlace, logic level and edge related parameters in
4210
	 * videomode with default values
4211
	 */
4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
	dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
	dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
	dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
	dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
	dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
	dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
	dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
	dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
	dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
	dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
	dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;

	dss_mgr_set_timings(channel, &dsi->vm);
4225

4226
	r = dsi_configure_dispc_clocks(dsidev);
4227 4228 4229 4230 4231
	if (r)
		goto err1;

	dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
	dsi->mgr_config.video_port_width =
4232
			dsi_get_pixel_size(dsi->pix_fmt);
4233 4234
	dsi->mgr_config.lcden_sig_polarity = 0;

4235
	dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
4236

T
Tomi Valkeinen 已提交
4237
	return 0;
4238
err1:
4239
	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4240
		dss_mgr_unregister_framedone_handler(channel,
4241
				dsi_framedone_irq_callback, dsidev);
4242
err:
4243
	dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4244
	return r;
T
Tomi Valkeinen 已提交
4245 4246
}

4247
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4248
		enum omap_channel channel)
T
Tomi Valkeinen 已提交
4249
{
4250 4251
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

4252
	if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4253
		dss_mgr_unregister_framedone_handler(channel,
4254
				dsi_framedone_irq_callback, dsidev);
4255

4256
	dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
T
Tomi Valkeinen 已提交
4257 4258
}

4259
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
4260
{
4261
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4262
	struct dss_pll_clock_info cinfo;
T
Tomi Valkeinen 已提交
4263 4264
	int r;

4265 4266
	cinfo = dsi->user_dsi_cinfo;

4267
	r = dss_pll_set_config(&dsi->pll, &cinfo);
T
Tomi Valkeinen 已提交
4268 4269 4270 4271 4272 4273 4274 4275
	if (r) {
		DSSERR("Failed to set dsi clocks\n");
		return r;
	}

	return 0;
}

4276
static int dsi_display_init_dsi(struct platform_device *dsidev)
T
Tomi Valkeinen 已提交
4277
{
4278
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
4279 4280
	int r;

4281
	r = dss_pll_enable(&dsi->pll);
T
Tomi Valkeinen 已提交
4282 4283 4284
	if (r)
		goto err0;

4285
	r = dsi_configure_dsi_clocks(dsidev);
T
Tomi Valkeinen 已提交
4286 4287 4288
	if (r)
		goto err1;

4289
	dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4290 4291
			DSS_CLK_SRC_PLL1_2 :
			DSS_CLK_SRC_PLL2_2);
T
Tomi Valkeinen 已提交
4292 4293 4294

	DSSDBG("PLL OK\n");

4295
	r = dsi_cio_init(dsidev);
T
Tomi Valkeinen 已提交
4296 4297 4298
	if (r)
		goto err2;

4299
	_dsi_print_reset_status(dsidev);
T
Tomi Valkeinen 已提交
4300

4301
	dsi_proto_timings(dsidev);
4302
	dsi_set_lp_clk_divisor(dsidev);
T
Tomi Valkeinen 已提交
4303 4304

	if (1)
4305
		_dsi_print_reset_status(dsidev);
T
Tomi Valkeinen 已提交
4306

4307
	r = dsi_proto_config(dsidev);
T
Tomi Valkeinen 已提交
4308 4309 4310 4311
	if (r)
		goto err3;

	/* enable interface */
4312 4313 4314 4315 4316 4317
	dsi_vc_enable(dsidev, 0, 1);
	dsi_vc_enable(dsidev, 1, 1);
	dsi_vc_enable(dsidev, 2, 1);
	dsi_vc_enable(dsidev, 3, 1);
	dsi_if_enable(dsidev, 1);
	dsi_force_tx_stop_mode_io(dsidev);
T
Tomi Valkeinen 已提交
4318 4319 4320

	return 0;
err3:
4321
	dsi_cio_uninit(dsidev);
T
Tomi Valkeinen 已提交
4322
err2:
4323
	dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
T
Tomi Valkeinen 已提交
4324
err1:
4325
	dss_pll_disable(&dsi->pll);
T
Tomi Valkeinen 已提交
4326 4327 4328 4329
err0:
	return r;
}

4330
static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4331
		bool disconnect_lanes, bool enter_ulps)
T
Tomi Valkeinen 已提交
4332
{
4333
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4334

4335
	if (enter_ulps && !dsi->ulps_enabled)
4336
		dsi_enter_ulps(dsidev);
4337

4338
	/* disable interface */
4339 4340 4341 4342 4343
	dsi_if_enable(dsidev, 0);
	dsi_vc_enable(dsidev, 0, 0);
	dsi_vc_enable(dsidev, 1, 0);
	dsi_vc_enable(dsidev, 2, 0);
	dsi_vc_enable(dsidev, 3, 0);
4344

4345
	dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4346
	dsi_cio_uninit(dsidev);
4347
	dsi_pll_uninit(dsidev, disconnect_lanes);
T
Tomi Valkeinen 已提交
4348 4349
}

4350
static int dsi_display_enable(struct omap_dss_device *dssdev)
T
Tomi Valkeinen 已提交
4351
{
4352
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4353
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
T
Tomi Valkeinen 已提交
4354 4355 4356 4357
	int r = 0;

	DSSDBG("dsi_display_enable\n");

4358
	WARN_ON(!dsi_bus_is_locked(dsidev));
4359

4360
	mutex_lock(&dsi->lock);
T
Tomi Valkeinen 已提交
4361

4362
	r = dsi_runtime_get(dsidev);
T
Tomi Valkeinen 已提交
4363
	if (r)
4364 4365 4366
		goto err_get_dsi;

	_dsi_initialize_irq(dsidev);
T
Tomi Valkeinen 已提交
4367

4368
	r = dsi_display_init_dsi(dsidev);
T
Tomi Valkeinen 已提交
4369
	if (r)
4370
		goto err_init_dsi;
T
Tomi Valkeinen 已提交
4371

4372
	mutex_unlock(&dsi->lock);
T
Tomi Valkeinen 已提交
4373 4374 4375

	return 0;

4376 4377 4378
err_init_dsi:
	dsi_runtime_put(dsidev);
err_get_dsi:
4379
	mutex_unlock(&dsi->lock);
T
Tomi Valkeinen 已提交
4380 4381 4382 4383
	DSSDBG("dsi_display_enable FAILED\n");
	return r;
}

4384
static void dsi_display_disable(struct omap_dss_device *dssdev,
4385
		bool disconnect_lanes, bool enter_ulps)
T
Tomi Valkeinen 已提交
4386
{
4387
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4388
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4389

T
Tomi Valkeinen 已提交
4390 4391
	DSSDBG("dsi_display_disable\n");

4392
	WARN_ON(!dsi_bus_is_locked(dsidev));
T
Tomi Valkeinen 已提交
4393

4394
	mutex_lock(&dsi->lock);
T
Tomi Valkeinen 已提交
4395

4396 4397 4398 4399 4400
	dsi_sync_vc(dsidev, 0);
	dsi_sync_vc(dsidev, 1);
	dsi_sync_vc(dsidev, 2);
	dsi_sync_vc(dsidev, 3);

4401
	dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
T
Tomi Valkeinen 已提交
4402

4403
	dsi_runtime_put(dsidev);
T
Tomi Valkeinen 已提交
4404

4405
	mutex_unlock(&dsi->lock);
T
Tomi Valkeinen 已提交
4406 4407
}

4408
static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
T
Tomi Valkeinen 已提交
4409
{
4410 4411 4412 4413
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

	dsi->te_enabled = enable;
4414
	return 0;
T
Tomi Valkeinen 已提交
4415 4416
}

4417 4418 4419 4420 4421 4422 4423 4424 4425
#ifdef PRINT_VERBOSE_VM_TIMINGS
static void print_dsi_vm(const char *str,
		const struct omap_dss_dsi_videomode_timings *t)
{
	unsigned long byteclk = t->hsclk / 4;
	int bl, wc, pps, tot;

	wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
	pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
4426
	bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
4427 4428 4429 4430 4431 4432 4433 4434
	tot = bl + pps;

#define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))

	pr_debug("%s bck %lu, %u/%u/%u/%u/%u/%u = %u+%u = %u, "
			"%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
			str,
			byteclk,
4435
			t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
4436 4437 4438 4439 4440 4441
			bl, pps, tot,
			TO_DSI_T(t->hss),
			TO_DSI_T(t->hsa),
			TO_DSI_T(t->hse),
			TO_DSI_T(t->hbp),
			TO_DSI_T(pps),
4442
			TO_DSI_T(t->hfp),
4443 4444 4445 4446 4447 4448 4449 4450

			TO_DSI_T(bl),
			TO_DSI_T(pps),

			TO_DSI_T(tot));
#undef TO_DSI_T
}

4451
static void print_dispc_vm(const char *str, const struct videomode *vm)
4452
{
4453
	unsigned long pck = vm->pixelclock;
4454 4455
	int hact, bl, tot;

4456
	hact = vm->hactive;
4457
	bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch;
4458 4459 4460 4461 4462 4463 4464 4465
	tot = hact + bl;

#define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))

	pr_debug("%s pck %lu, %u/%u/%u/%u = %u+%u = %u, "
			"%u/%u/%u/%u = %u + %u = %u\n",
			str,
			pck,
4466
			vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch,
4467
			bl, hact, tot,
4468
			TO_DISPC_T(vm->hsync_len),
4469
			TO_DISPC_T(vm->hback_porch),
4470
			TO_DISPC_T(hact),
4471
			TO_DISPC_T(vm->hfront_porch),
4472 4473 4474 4475 4476 4477 4478 4479 4480 4481
			TO_DISPC_T(bl),
			TO_DISPC_T(hact),
			TO_DISPC_T(tot));
#undef TO_DISPC_T
}

/* note: this is not quite accurate */
static void print_dsi_dispc_vm(const char *str,
		const struct omap_dss_dsi_videomode_timings *t)
{
4482
	struct videomode vm = { 0 };
4483 4484 4485 4486 4487 4488 4489 4490
	unsigned long byteclk = t->hsclk / 4;
	unsigned long pck;
	u64 dsi_tput;
	int dsi_hact, dsi_htot;

	dsi_tput = (u64)byteclk * t->ndl * 8;
	pck = (u32)div64_u64(dsi_tput, t->bitspp);
	dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
4491
	dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
4492

4493
	vm.pixelclock = pck;
4494
	vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
4495 4496
	vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk);
	vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk);
4497
	vm.hactive = t->hact;
4498 4499 4500 4501 4502 4503 4504

	print_dispc_vm(str, &vm);
}
#endif /* PRINT_VERBOSE_VM_TIMINGS */

static bool dsi_cm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
		unsigned long pck, void *data)
4505
{
4506
	struct dsi_clk_calc_ctx *ctx = data;
4507
	struct videomode *vm = &ctx->vm;
4508

4509 4510 4511 4512
	ctx->dispc_cinfo.lck_div = lckd;
	ctx->dispc_cinfo.pck_div = pckd;
	ctx->dispc_cinfo.lck = lck;
	ctx->dispc_cinfo.pck = pck;
4513

4514 4515 4516 4517 4518 4519
	*vm = *ctx->config->vm;
	vm->pixelclock = pck;
	vm->hactive = ctx->config->vm->hactive;
	vm->vactive = ctx->config->vm->vactive;
	vm->hsync_len = vm->hfront_porch = vm->hback_porch = vm->vsync_len = 1;
	vm->vfront_porch = vm->vback_porch = 0;
4520

4521
	return true;
4522 4523
}

4524
static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
4525
		void *data)
4526
{
4527
	struct dsi_clk_calc_ctx *ctx = data;
4528

4529
	ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
4530
	ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
4531

4532 4533 4534
	return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
			dsi_cm_calc_dispc_cb, ctx);
}
4535

4536 4537
static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
		unsigned long clkdco, void *data)
4538 4539 4540
{
	struct dsi_clk_calc_ctx *ctx = data;

4541 4542
	ctx->dsi_cinfo.n = n;
	ctx->dsi_cinfo.m = m;
4543
	ctx->dsi_cinfo.fint = fint;
4544
	ctx->dsi_cinfo.clkdco = clkdco;
4545

4546
	return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4547
			dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4548
			dsi_cm_calc_hsdiv_cb, ctx);
4549 4550
}

4551 4552 4553
static bool dsi_cm_calc(struct dsi_data *dsi,
		const struct omap_dss_dsi_config *cfg,
		struct dsi_clk_calc_ctx *ctx)
4554
{
4555 4556 4557 4558
	unsigned long clkin;
	int bitspp, ndl;
	unsigned long pll_min, pll_max;
	unsigned long pck, txbyteclk;
4559

4560
	clkin = clk_get_rate(dsi->pll.clkin);
4561 4562 4563 4564 4565 4566 4567 4568 4569
	bitspp = dsi_get_pixel_size(cfg->pixel_format);
	ndl = dsi->num_lanes_used - 1;

	/*
	 * Here we should calculate minimum txbyteclk to be able to send the
	 * frame in time, and also to handle TE. That's not very simple, though,
	 * especially as we go to LP between each pixel packet due to HW
	 * "feature". So let's just estimate very roughly and multiply by 1.5.
	 */
4570
	pck = cfg->vm->pixelclock;
4571 4572
	pck = pck * 3 / 2;
	txbyteclk = pck * bitspp / 8 / ndl;
4573

4574 4575
	memset(ctx, 0, sizeof(*ctx));
	ctx->dsidev = dsi->pdev;
4576
	ctx->pll = &dsi->pll;
4577 4578 4579 4580
	ctx->config = cfg;
	ctx->req_pck_min = pck;
	ctx->req_pck_nom = pck;
	ctx->req_pck_max = pck * 3 / 2;
4581

4582 4583 4584
	pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
	pll_max = cfg->hs_clk_max * 4;

4585
	return dss_pll_calc_a(ctx->pll, clkin,
4586 4587
			pll_min, pll_max,
			dsi_cm_calc_pll_cb, ctx);
4588 4589
}

4590
static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
4591
{
4592 4593 4594 4595
	struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
	const struct omap_dss_dsi_config *cfg = ctx->config;
	int bitspp = dsi_get_pixel_size(cfg->pixel_format);
	int ndl = dsi->num_lanes_used - 1;
4596
	unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4;
4597
	unsigned long byteclk = hsclk / 4;
4598

4599 4600 4601 4602 4603 4604
	unsigned long dispc_pck, req_pck_min, req_pck_nom, req_pck_max;
	int xres;
	int panel_htot, panel_hbl; /* pixels */
	int dispc_htot, dispc_hbl; /* pixels */
	int dsi_htot, dsi_hact, dsi_hbl, hss, hse; /* byteclks */
	int hfp, hsa, hbp;
4605 4606
	const struct videomode *req_vm;
	struct videomode *dispc_vm;
4607 4608
	struct omap_dss_dsi_videomode_timings *dsi_vm;
	u64 dsi_tput, dispc_tput;
4609

4610
	dsi_tput = (u64)byteclk * ndl * 8;
4611

4612
	req_vm = cfg->vm;
4613 4614 4615 4616 4617 4618 4619
	req_pck_min = ctx->req_pck_min;
	req_pck_max = ctx->req_pck_max;
	req_pck_nom = ctx->req_pck_nom;

	dispc_pck = ctx->dispc_cinfo.pck;
	dispc_tput = (u64)dispc_pck * bitspp;

4620
	xres = req_vm->hactive;
4621

4622 4623
	panel_hbl = req_vm->hfront_porch + req_vm->hback_porch +
		    req_vm->hsync_len;
4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652
	panel_htot = xres + panel_hbl;

	dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(xres * bitspp, 8) + 6, ndl);

	/*
	 * When there are no line buffers, DISPC and DSI must have the
	 * same tput. Otherwise DISPC tput needs to be higher than DSI's.
	 */
	if (dsi->line_buffer_size < xres * bitspp / 8) {
		if (dispc_tput != dsi_tput)
			return false;
	} else {
		if (dispc_tput < dsi_tput)
			return false;
	}

	/* DSI tput must be over the min requirement */
	if (dsi_tput < (u64)bitspp * req_pck_min)
		return false;

	/* When non-burst mode, DSI tput must be below max requirement. */
	if (cfg->trans_mode != OMAP_DSS_DSI_BURST_MODE) {
		if (dsi_tput > (u64)bitspp * req_pck_max)
			return false;
	}

	hss = DIV_ROUND_UP(4, ndl);

	if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4653
		if (ndl == 3 && req_vm->hsync_len == 0)
4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691
			hse = 1;
		else
			hse = DIV_ROUND_UP(4, ndl);
	} else {
		hse = 0;
	}

	/* DSI htot to match the panel's nominal pck */
	dsi_htot = div64_u64((u64)panel_htot * byteclk, req_pck_nom);

	/* fail if there would be no time for blanking */
	if (dsi_htot < hss + hse + dsi_hact)
		return false;

	/* total DSI blanking needed to achieve panel's TL */
	dsi_hbl = dsi_htot - dsi_hact;

	/* DISPC htot to match the DSI TL */
	dispc_htot = div64_u64((u64)dsi_htot * dispc_pck, byteclk);

	/* verify that the DSI and DISPC TLs are the same */
	if ((u64)dsi_htot * dispc_pck != (u64)dispc_htot * byteclk)
		return false;

	dispc_hbl = dispc_htot - xres;

	/* setup DSI videomode */

	dsi_vm = &ctx->dsi_vm;
	memset(dsi_vm, 0, sizeof(*dsi_vm));

	dsi_vm->hsclk = hsclk;

	dsi_vm->ndl = ndl;
	dsi_vm->bitspp = bitspp;

	if (cfg->trans_mode != OMAP_DSS_DSI_PULSE_MODE) {
		hsa = 0;
4692
	} else if (ndl == 3 && req_vm->hsync_len == 0) {
4693 4694
		hsa = 0;
	} else {
4695
		hsa = div64_u64((u64)req_vm->hsync_len * byteclk, req_pck_nom);
4696 4697 4698
		hsa = max(hsa - hse, 1);
	}

4699
	hbp = div64_u64((u64)req_vm->hback_porch * byteclk, req_pck_nom);
4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728
	hbp = max(hbp, 1);

	hfp = dsi_hbl - (hss + hsa + hse + hbp);
	if (hfp < 1) {
		int t;
		/* we need to take cycles from hbp */

		t = 1 - hfp;
		hbp = max(hbp - t, 1);
		hfp = dsi_hbl - (hss + hsa + hse + hbp);

		if (hfp < 1 && hsa > 0) {
			/* we need to take cycles from hsa */
			t = 1 - hfp;
			hsa = max(hsa - t, 1);
			hfp = dsi_hbl - (hss + hsa + hse + hbp);
		}
	}

	if (hfp < 1)
		return false;

	dsi_vm->hss = hss;
	dsi_vm->hsa = hsa;
	dsi_vm->hse = hse;
	dsi_vm->hbp = hbp;
	dsi_vm->hact = xres;
	dsi_vm->hfp = hfp;

4729
	dsi_vm->vsa = req_vm->vsync_len;
4730
	dsi_vm->vbp = req_vm->vback_porch;
4731
	dsi_vm->vact = req_vm->vactive;
4732
	dsi_vm->vfp = req_vm->vfront_porch;
4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745

	dsi_vm->trans_mode = cfg->trans_mode;

	dsi_vm->blanking_mode = 0;
	dsi_vm->hsa_blanking_mode = 1;
	dsi_vm->hfp_blanking_mode = 1;
	dsi_vm->hbp_blanking_mode = 1;

	dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on;
	dsi_vm->window_sync = 4;

	/* setup DISPC videomode */

4746
	dispc_vm = &ctx->vm;
4747
	*dispc_vm = *req_vm;
4748
	dispc_vm->pixelclock = dispc_pck;
4749 4750

	if (cfg->trans_mode == OMAP_DSS_DSI_PULSE_MODE) {
4751
		hsa = div64_u64((u64)req_vm->hsync_len * dispc_pck,
4752 4753 4754 4755 4756 4757
				req_pck_nom);
		hsa = max(hsa, 1);
	} else {
		hsa = 1;
	}

4758
	hbp = div64_u64((u64)req_vm->hback_porch * dispc_pck, req_pck_nom);
4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780
	hbp = max(hbp, 1);

	hfp = dispc_hbl - hsa - hbp;
	if (hfp < 1) {
		int t;
		/* we need to take cycles from hbp */

		t = 1 - hfp;
		hbp = max(hbp - t, 1);
		hfp = dispc_hbl - hsa - hbp;

		if (hfp < 1) {
			/* we need to take cycles from hsa */
			t = 1 - hfp;
			hsa = max(hsa - t, 1);
			hfp = dispc_hbl - hsa - hbp;
		}
	}

	if (hfp < 1)
		return false;

4781
	dispc_vm->hfront_porch = hfp;
4782
	dispc_vm->hsync_len = hsa;
4783
	dispc_vm->hback_porch = hbp;
4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802

	return true;
}


static bool dsi_vm_calc_dispc_cb(int lckd, int pckd, unsigned long lck,
		unsigned long pck, void *data)
{
	struct dsi_clk_calc_ctx *ctx = data;

	ctx->dispc_cinfo.lck_div = lckd;
	ctx->dispc_cinfo.pck_div = pckd;
	ctx->dispc_cinfo.lck = lck;
	ctx->dispc_cinfo.pck = pck;

	if (dsi_vm_calc_blanking(ctx) == false)
		return false;

#ifdef PRINT_VERBOSE_VM_TIMINGS
4803
	print_dispc_vm("dispc", &ctx->vm);
4804
	print_dsi_vm("dsi  ", &ctx->dsi_vm);
4805
	print_dispc_vm("req  ", ctx->config->vm);
4806 4807 4808 4809 4810 4811
	print_dsi_dispc_vm("act  ", &ctx->dsi_vm);
#endif

	return true;
}

4812
static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
4813 4814 4815 4816 4817
		void *data)
{
	struct dsi_clk_calc_ctx *ctx = data;
	unsigned long pck_max;

4818
	ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
4819
	ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834

	/*
	 * In burst mode we can let the dispc pck be arbitrarily high, but it
	 * limits our scaling abilities. So for now, don't aim too high.
	 */

	if (ctx->config->trans_mode == OMAP_DSS_DSI_BURST_MODE)
		pck_max = ctx->req_pck_max + 10000000;
	else
		pck_max = ctx->req_pck_max;

	return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
			dsi_vm_calc_dispc_cb, ctx);
}

4835 4836
static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
		unsigned long clkdco, void *data)
4837 4838 4839
{
	struct dsi_clk_calc_ctx *ctx = data;

4840 4841
	ctx->dsi_cinfo.n = n;
	ctx->dsi_cinfo.m = m;
4842
	ctx->dsi_cinfo.fint = fint;
4843
	ctx->dsi_cinfo.clkdco = clkdco;
4844

4845
	return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4846
			dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4847 4848 4849 4850 4851 4852 4853
			dsi_vm_calc_hsdiv_cb, ctx);
}

static bool dsi_vm_calc(struct dsi_data *dsi,
		const struct omap_dss_dsi_config *cfg,
		struct dsi_clk_calc_ctx *ctx)
{
4854
	const struct videomode *vm = cfg->vm;
4855 4856 4857 4858 4859 4860 4861
	unsigned long clkin;
	unsigned long pll_min;
	unsigned long pll_max;
	int ndl = dsi->num_lanes_used - 1;
	int bitspp = dsi_get_pixel_size(cfg->pixel_format);
	unsigned long byteclk_min;

4862
	clkin = clk_get_rate(dsi->pll.clkin);
4863 4864 4865

	memset(ctx, 0, sizeof(*ctx));
	ctx->dsidev = dsi->pdev;
4866
	ctx->pll = &dsi->pll;
4867 4868 4869
	ctx->config = cfg;

	/* these limits should come from the panel driver */
4870 4871 4872
	ctx->req_pck_min = vm->pixelclock - 1000;
	ctx->req_pck_nom = vm->pixelclock;
	ctx->req_pck_max = vm->pixelclock + 1000;
4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886

	byteclk_min = div64_u64((u64)ctx->req_pck_min * bitspp, ndl * 8);
	pll_min = max(cfg->hs_clk_min * 4, byteclk_min * 4 * 4);

	if (cfg->trans_mode == OMAP_DSS_DSI_BURST_MODE) {
		pll_max = cfg->hs_clk_max * 4;
	} else {
		unsigned long byteclk_max;
		byteclk_max = div64_u64((u64)ctx->req_pck_max * bitspp,
				ndl * 8);

		pll_max = byteclk_max * 4 * 4;
	}

4887
	return dss_pll_calc_a(ctx->pll, clkin,
4888 4889
			pll_min, pll_max,
			dsi_vm_calc_pll_cb, ctx);
4890 4891
}

4892
static int dsi_set_config(struct omap_dss_device *dssdev,
4893
		const struct omap_dss_dsi_config *config)
4894 4895 4896
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4897 4898 4899
	struct dsi_clk_calc_ctx ctx;
	bool ok;
	int r;
4900 4901 4902

	mutex_lock(&dsi->lock);

4903 4904
	dsi->pix_fmt = config->pixel_format;
	dsi->mode = config->mode;
4905

4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918
	if (config->mode == OMAP_DSS_DSI_VIDEO_MODE)
		ok = dsi_vm_calc(dsi, config, &ctx);
	else
		ok = dsi_cm_calc(dsi, config, &ctx);

	if (!ok) {
		DSSERR("failed to find suitable DSI clock settings\n");
		r = -EINVAL;
		goto err;
	}

	dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);

4919
	r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
4920
		config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
4921 4922 4923 4924 4925 4926 4927 4928
	if (r) {
		DSSERR("failed to find suitable DSI LP clock settings\n");
		goto err;
	}

	dsi->user_dsi_cinfo = ctx.dsi_cinfo;
	dsi->user_dispc_cinfo = ctx.dispc_cinfo;

4929
	dsi->vm = ctx.vm;
4930
	dsi->vm_timings = ctx.dsi_vm;
4931 4932

	mutex_unlock(&dsi->lock);
4933

4934
	return 0;
4935 4936 4937 4938
err:
	mutex_unlock(&dsi->lock);

	return r;
4939 4940
}

4941 4942 4943 4944 4945 4946
/*
 * Return a hardcoded channel for the DSI output. This should work for
 * current use cases, but this can be later expanded to either resolve
 * the channel in some more dynamic manner, or get the channel as a user
 * parameter.
 */
4947
static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
4948
{
4949 4950
	switch (dsi->data->model) {
	case DSI_MODEL_OMAP3:
4951 4952
		return OMAP_DSS_CHANNEL_LCD;

4953 4954
	case DSI_MODEL_OMAP4:
		switch (dsi->module_id) {
4955 4956 4957 4958 4959 4960 4961 4962 4963
		case 0:
			return OMAP_DSS_CHANNEL_LCD;
		case 1:
			return OMAP_DSS_CHANNEL_LCD2;
		default:
			DSSWARN("unsupported module id\n");
			return OMAP_DSS_CHANNEL_LCD;
		}

4964 4965
	case DSI_MODEL_OMAP5:
		switch (dsi->module_id) {
4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978
		case 0:
			return OMAP_DSS_CHANNEL_LCD;
		case 1:
			return OMAP_DSS_CHANNEL_LCD3;
		default:
			DSSWARN("unsupported module id\n");
			return OMAP_DSS_CHANNEL_LCD;
		}

	default:
		DSSWARN("unsupported DSS version\n");
		return OMAP_DSS_CHANNEL_LCD;
	}
4979 4980
}

4981
static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4982
{
4983 4984
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4985 4986
	int i;

4987 4988 4989
	for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
		if (!dsi->vc[i].dssdev) {
			dsi->vc[i].dssdev = dssdev;
4990 4991 4992 4993 4994 4995 4996 4997 4998
			*channel = i;
			return 0;
		}
	}

	DSSERR("cannot get VC for display %s", dssdev->name);
	return -ENOSPC;
}

4999
static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
5000
{
5001 5002 5003
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

5004 5005 5006 5007 5008 5009 5010 5011 5012 5013
	if (vc_id < 0 || vc_id > 3) {
		DSSERR("VC ID out of range\n");
		return -EINVAL;
	}

	if (channel < 0 || channel > 3) {
		DSSERR("Virtual Channel out of range\n");
		return -EINVAL;
	}

5014
	if (dsi->vc[channel].dssdev != dssdev) {
5015 5016 5017 5018 5019
		DSSERR("Virtual Channel not allocated to display %s\n",
			dssdev->name);
		return -EINVAL;
	}

5020
	dsi->vc[channel].vc_id = vc_id;
5021 5022 5023 5024

	return 0;
}

5025
static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
5026
{
5027 5028 5029
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

5030
	if ((channel >= 0 && channel <= 3) &&
5031 5032 5033
		dsi->vc[channel].dssdev == dssdev) {
		dsi->vc[channel].dssdev = NULL;
		dsi->vc[channel].vc_id = 0;
5034 5035 5036
	}
}

5037

5038 5039 5040 5041 5042
static int dsi_get_clocks(struct platform_device *dsidev)
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	struct clk *clk;

S
Sachin Kamat 已提交
5043
	clk = devm_clk_get(&dsidev->dev, "fck");
5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
	if (IS_ERR(clk)) {
		DSSERR("can't get fck\n");
		return PTR_ERR(clk);
	}

	dsi->dss_clk = clk;

	return 0;
}

T
Tomi Valkeinen 已提交
5054 5055 5056 5057
static int dsi_connect(struct omap_dss_device *dssdev,
		struct omap_dss_device *dst)
{
	struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5058
	enum omap_channel dispc_channel = dssdev->dispc_channel;
T
Tomi Valkeinen 已提交
5059 5060 5061 5062 5063 5064
	int r;

	r = dsi_regulator_init(dsidev);
	if (r)
		return r;

5065
	r = dss_mgr_connect(dispc_channel, dssdev);
T
Tomi Valkeinen 已提交
5066 5067 5068 5069 5070 5071 5072
	if (r)
		return r;

	r = omapdss_output_set_device(dssdev, dst);
	if (r) {
		DSSERR("failed to connect output to new device: %s\n",
				dssdev->name);
5073
		dss_mgr_disconnect(dispc_channel, dssdev);
T
Tomi Valkeinen 已提交
5074 5075 5076 5077 5078 5079 5080 5081 5082
		return r;
	}

	return 0;
}

static void dsi_disconnect(struct omap_dss_device *dssdev,
		struct omap_dss_device *dst)
{
5083 5084
	enum omap_channel dispc_channel = dssdev->dispc_channel;

5085
	WARN_ON(dst != dssdev->dst);
T
Tomi Valkeinen 已提交
5086

5087
	if (dst != dssdev->dst)
T
Tomi Valkeinen 已提交
5088 5089 5090 5091
		return;

	omapdss_output_unset_device(dssdev);

5092
	dss_mgr_disconnect(dispc_channel, dssdev);
T
Tomi Valkeinen 已提交
5093 5094 5095 5096 5097 5098 5099 5100 5101
}

static const struct omapdss_dsi_ops dsi_ops = {
	.connect = dsi_connect,
	.disconnect = dsi_disconnect,

	.bus_lock = dsi_bus_lock,
	.bus_unlock = dsi_bus_unlock,

5102 5103
	.enable = dsi_display_enable,
	.disable = dsi_display_disable,
T
Tomi Valkeinen 已提交
5104

5105
	.enable_hs = dsi_vc_enable_hs,
T
Tomi Valkeinen 已提交
5106

5107 5108
	.configure_pins = dsi_configure_pins,
	.set_config = dsi_set_config,
T
Tomi Valkeinen 已提交
5109 5110 5111 5112

	.enable_video_output = dsi_enable_video_output,
	.disable_video_output = dsi_disable_video_output,

5113
	.update = dsi_update,
T
Tomi Valkeinen 已提交
5114

5115
	.enable_te = dsi_enable_te,
T
Tomi Valkeinen 已提交
5116

5117 5118 5119
	.request_vc = dsi_request_vc,
	.set_vc_id = dsi_set_vc_id,
	.release_vc = dsi_release_vc,
T
Tomi Valkeinen 已提交
5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133

	.dcs_write = dsi_vc_dcs_write,
	.dcs_write_nosync = dsi_vc_dcs_write_nosync,
	.dcs_read = dsi_vc_dcs_read,

	.gen_write = dsi_vc_generic_write,
	.gen_write_nosync = dsi_vc_generic_write_nosync,
	.gen_read = dsi_vc_generic_read,

	.bta_sync = dsi_vc_send_bta_sync,

	.set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
};

5134
static void dsi_init_output(struct platform_device *dsidev)
5135 5136
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5137
	struct omap_dss_device *out = &dsi->output;
5138

5139
	out->dev = &dsidev->dev;
5140 5141 5142
	out->id = dsi->module_id == 0 ?
			OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;

5143
	out->output_type = OMAP_DISPLAY_TYPE_DSI;
T
Tomi Valkeinen 已提交
5144
	out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
5145
	out->dispc_channel = dsi_get_channel(dsi);
T
Tomi Valkeinen 已提交
5146
	out->ops.dsi = &dsi_ops;
5147
	out->owner = THIS_MODULE;
5148

5149
	omapdss_register_output(out);
5150 5151
}

5152
static void dsi_uninit_output(struct platform_device *dsidev)
5153 5154
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5155
	struct omap_dss_device *out = &dsi->output;
5156

5157
	omapdss_unregister_output(out);
5158 5159
}

T
Tomi Valkeinen 已提交
5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170
static int dsi_probe_of(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
	struct property *prop;
	u32 lane_arr[10];
	int len, num_pins;
	int r, i;
	struct device_node *ep;
	struct omap_dsi_pin_config pin_cfg;

5171
	ep = of_graph_get_endpoint_by_regs(node, 0, 0);
T
Tomi Valkeinen 已提交
5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215
	if (!ep)
		return 0;

	prop = of_find_property(ep, "lanes", &len);
	if (prop == NULL) {
		dev_err(&pdev->dev, "failed to find lane data\n");
		r = -EINVAL;
		goto err;
	}

	num_pins = len / sizeof(u32);

	if (num_pins < 4 || num_pins % 2 != 0 ||
		num_pins > dsi->num_lanes_supported * 2) {
		dev_err(&pdev->dev, "bad number of lanes\n");
		r = -EINVAL;
		goto err;
	}

	r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
	if (r) {
		dev_err(&pdev->dev, "failed to read lane data\n");
		goto err;
	}

	pin_cfg.num_pins = num_pins;
	for (i = 0; i < num_pins; ++i)
		pin_cfg.pins[i] = (int)lane_arr[i];

	r = dsi_configure_pins(&dsi->output, &pin_cfg);
	if (r) {
		dev_err(&pdev->dev, "failed to configure pins");
		goto err;
	}

	of_node_put(ep);

	return 0;

err:
	of_node_put(ep);
	return r;
}

5216 5217 5218 5219 5220 5221 5222
static const struct dss_pll_ops dsi_pll_ops = {
	.enable = dsi_pll_enable,
	.disable = dsi_pll_disable,
	.set_config = dss_pll_write_config_type_a,
};

static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5223 5224
	.type = DSS_PLL_TYPE_A,

5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249
	.n_max = (1 << 7) - 1,
	.m_max = (1 << 11) - 1,
	.mX_max = (1 << 4) - 1,
	.fint_min = 750000,
	.fint_max = 2100000,
	.clkdco_low = 1000000000,
	.clkdco_max = 1800000000,

	.n_msb = 7,
	.n_lsb = 1,
	.m_msb = 18,
	.m_lsb = 8,

	.mX_msb[0] = 22,
	.mX_lsb[0] = 19,
	.mX_msb[1] = 26,
	.mX_lsb[1] = 23,

	.has_stopmode = true,
	.has_freqsel = true,
	.has_selfreqdco = false,
	.has_refsel = false,
};

static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5250 5251
	.type = DSS_PLL_TYPE_A,

5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276
	.n_max = (1 << 8) - 1,
	.m_max = (1 << 12) - 1,
	.mX_max = (1 << 5) - 1,
	.fint_min = 500000,
	.fint_max = 2500000,
	.clkdco_low = 1000000000,
	.clkdco_max = 1800000000,

	.n_msb = 8,
	.n_lsb = 1,
	.m_msb = 20,
	.m_lsb = 9,

	.mX_msb[0] = 25,
	.mX_lsb[0] = 21,
	.mX_msb[1] = 30,
	.mX_lsb[1] = 26,

	.has_stopmode = true,
	.has_freqsel = false,
	.has_selfreqdco = false,
	.has_refsel = false,
};

static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5277 5278
	.type = DSS_PLL_TYPE_A,

5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316
	.n_max = (1 << 8) - 1,
	.m_max = (1 << 12) - 1,
	.mX_max = (1 << 5) - 1,
	.fint_min = 150000,
	.fint_max = 52000000,
	.clkdco_low = 1000000000,
	.clkdco_max = 1800000000,

	.n_msb = 8,
	.n_lsb = 1,
	.m_msb = 20,
	.m_lsb = 9,

	.mX_msb[0] = 25,
	.mX_lsb[0] = 21,
	.mX_msb[1] = 30,
	.mX_lsb[1] = 26,

	.has_stopmode = true,
	.has_freqsel = false,
	.has_selfreqdco = true,
	.has_refsel = true,
};

static int dsi_init_pll_data(struct platform_device *dsidev)
{
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
	struct dss_pll *pll = &dsi->pll;
	struct clk *clk;
	int r;

	clk = devm_clk_get(&dsidev->dev, "sys_clk");
	if (IS_ERR(clk)) {
		DSSERR("can't get sys_clk\n");
		return PTR_ERR(clk);
	}

	pll->name = dsi->module_id == 0 ? "dsi0" : "dsi1";
T
Tomi Valkeinen 已提交
5317
	pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
5318 5319
	pll->clkin = clk;
	pll->base = dsi->pll_base;
5320
	pll->hw = dsi->data->pll_hw;
5321 5322 5323 5324 5325 5326 5327 5328 5329
	pll->ops = &dsi_pll_ops;

	r = dss_pll_register(pll);
	if (r)
		return r;

	return 0;
}

5330
/* DSI1 HW IP initialisation */
5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386
static const struct dsi_of_data dsi_of_data_omap34xx = {
	.model = DSI_MODEL_OMAP3,
	.pll_hw = &dss_omap3_dsi_pll_hw,
	.modules = (const struct dsi_module_id_data[]) {
		{ .address = 0x4804fc00, .id = 0, },
		{ },
	},
	.quirks = DSI_QUIRK_REVERSE_TXCLKESC,
};

static const struct dsi_of_data dsi_of_data_omap36xx = {
	.model = DSI_MODEL_OMAP3,
	.pll_hw = &dss_omap3_dsi_pll_hw,
	.modules = (const struct dsi_module_id_data[]) {
		{ .address = 0x4804fc00, .id = 0, },
		{ },
	},
	.quirks = DSI_QUIRK_PLL_PWR_BUG,
};

static const struct dsi_of_data dsi_of_data_omap4 = {
	.model = DSI_MODEL_OMAP4,
	.pll_hw = &dss_omap4_dsi_pll_hw,
	.modules = (const struct dsi_module_id_data[]) {
		{ .address = 0x58004000, .id = 0, },
		{ .address = 0x58005000, .id = 1, },
		{ },
	},
	.quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
		| DSI_QUIRK_GNQ,
};

static const struct dsi_of_data dsi_of_data_omap5 = {
	.model = DSI_MODEL_OMAP5,
	.pll_hw = &dss_omap5_dsi_pll_hw,
	.modules = (const struct dsi_module_id_data[]) {
		{ .address = 0x58004000, .id = 0, },
		{ .address = 0x58009000, .id = 1, },
		{ },
	},
	.quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
		| DSI_QUIRK_GNQ | DSI_QUIRK_PHY_DCC,
};

static const struct of_device_id dsi_of_match[] = {
	{ .compatible = "ti,omap3-dsi", .data = &dsi_of_data_omap36xx, },
	{ .compatible = "ti,omap4-dsi", .data = &dsi_of_data_omap4, },
	{ .compatible = "ti,omap5-dsi", .data = &dsi_of_data_omap5, },
	{},
};

static const struct soc_device_attribute dsi_soc_devices[] = {
	{ .machine = "OMAP3[45]*",	.data = &dsi_of_data_omap34xx },
	{ .machine = "AM35*",		.data = &dsi_of_data_omap34xx },
	{ /* sentinel */ }
};
T
Tomi Valkeinen 已提交
5387
static int dsi_bind(struct device *dev, struct device *master, void *data)
T
Tomi Valkeinen 已提交
5388
{
T
Tomi Valkeinen 已提交
5389
	struct platform_device *dsidev = to_platform_device(dev);
5390
	const struct soc_device_attribute *soc;
5391
	const struct dsi_module_id_data *d;
T
Tomi Valkeinen 已提交
5392
	u32 rev;
5393
	int r, i;
5394
	struct dsi_data *dsi;
T
Tomi Valkeinen 已提交
5395
	struct resource *dsi_mem;
5396
	struct resource *res;
5397

J
Julia Lawall 已提交
5398
	dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
5399 5400
	if (!dsi)
		return -ENOMEM;
T
Tomi Valkeinen 已提交
5401

5402 5403
	dsi->pdev = dsidev;
	dev_set_drvdata(&dsidev->dev, dsi);
5404

5405 5406 5407
	spin_lock_init(&dsi->irq_lock);
	spin_lock_init(&dsi->errors_lock);
	dsi->errors = 0;
T
Tomi Valkeinen 已提交
5408

5409
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5410 5411
	spin_lock_init(&dsi->irq_stats_lock);
	dsi->irq_stats.last_reset = jiffies;
5412 5413
#endif

5414 5415
	mutex_init(&dsi->lock);
	sema_init(&dsi->bus_lock, 1);
T
Tomi Valkeinen 已提交
5416

5417 5418
	INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
			     dsi_framedone_timeout_work_callback);
5419

T
Tomi Valkeinen 已提交
5420
#ifdef DSI_CATCH_MISSING_TE
5421 5422 5423
	init_timer(&dsi->te_timer);
	dsi->te_timer.function = dsi_te_timeout;
	dsi->te_timer.data = 0;
T
Tomi Valkeinen 已提交
5424
#endif
5425

5426 5427
	dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
	dsi->proto_base = devm_ioremap_resource(&dsidev->dev, dsi_mem);
5428 5429
	if (IS_ERR(dsi->proto_base))
		return PTR_ERR(dsi->proto_base);
5430 5431

	res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
5432 5433 5434
	dsi->phy_base = devm_ioremap_resource(&dsidev->dev, res);
	if (IS_ERR(dsi->phy_base))
		return PTR_ERR(dsi->phy_base);
5435 5436

	res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
5437 5438 5439
	dsi->pll_base = devm_ioremap_resource(&dsidev->dev, res);
	if (IS_ERR(dsi->pll_base))
		return PTR_ERR(dsi->pll_base);
5440

5441 5442
	dsi->irq = platform_get_irq(dsi->pdev, 0);
	if (dsi->irq < 0) {
5443
		DSSERR("platform_get_irq failed\n");
5444
		return -ENODEV;
5445 5446
	}

J
Julia Lawall 已提交
5447 5448
	r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
			     IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
5449 5450
	if (r < 0) {
		DSSERR("request_irq failed\n");
5451
		return r;
5452
	}
T
Tomi Valkeinen 已提交
5453

5454 5455 5456 5457 5458 5459
	soc = soc_device_match(dsi_soc_devices);
	if (soc)
		dsi->data = soc->data;
	else
		dsi->data = of_match_node(dsi_of_match, dev->of_node)->data;

5460
	d = dsi->data->modules;
5461 5462
	while (d->address != 0 && d->address != dsi_mem->start)
		d++;
T
Tomi Valkeinen 已提交
5463

5464 5465 5466
	if (d->address == 0) {
		DSSERR("unsupported DSI module\n");
		return -ENODEV;
T
Tomi Valkeinen 已提交
5467 5468
	}

5469 5470
	dsi->module_id = d->id;

5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485
	if (dsi->data->model == DSI_MODEL_OMAP4) {
		struct device_node *np;

		/*
		 * The OMAP4 display DT bindings don't reference the padconf
		 * syscon. Our only option to retrieve it is to find it by name.
		 */
		np = of_find_node_by_name(NULL, "omap4_padconf_global");
		if (!np)
			return -ENODEV;

		dsi->syscon = syscon_node_to_regmap(np);
		of_node_put(np);
	}

5486
	/* DSI VCs initialization */
5487
	for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5488
		dsi->vc[i].source = DSI_VC_SOURCE_L4;
5489 5490
		dsi->vc[i].dssdev = NULL;
		dsi->vc[i].vc_id = 0;
5491 5492
	}

5493 5494 5495 5496
	r = dsi_get_clocks(dsidev);
	if (r)
		return r;

5497 5498
	dsi_init_pll_data(dsidev);

5499 5500
	pm_runtime_enable(&dsidev->dev);

5501 5502
	r = dsi_runtime_get(dsidev);
	if (r)
5503
		goto err_runtime_get;
T
Tomi Valkeinen 已提交
5504

5505 5506
	rev = dsi_read_reg(dsidev, DSI_REVISION);
	dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
T
Tomi Valkeinen 已提交
5507 5508
	       FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));

5509 5510
	/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
	 * of data to 3 by default */
5511
	if (dsi->data->quirks & DSI_QUIRK_GNQ)
5512 5513 5514 5515
		/* NB_DATA_LANES */
		dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
	else
		dsi->num_lanes_supported = 3;
5516

5517 5518
	dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);

5519 5520
	dsi_init_output(dsidev);

5521 5522 5523 5524
	r = dsi_probe_of(dsidev);
	if (r) {
		DSSERR("Invalid DSI DT data\n");
		goto err_probe_of;
T
Tomi Valkeinen 已提交
5525 5526
	}

5527 5528 5529 5530
	r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, &dsidev->dev);
	if (r)
		DSSERR("Failed to populate DSI child devices: %d\n", r);

5531
	dsi_runtime_put(dsidev);
T
Tomi Valkeinen 已提交
5532

5533
	if (dsi->module_id == 0)
5534
		dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
5535
	else if (dsi->module_id == 1)
5536 5537 5538
		dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);

#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5539
	if (dsi->module_id == 0)
5540
		dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
5541
	else if (dsi->module_id == 1)
5542 5543
		dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
#endif
T
Tomi Valkeinen 已提交
5544

T
Tomi Valkeinen 已提交
5545
	return 0;
5546

T
Tomi Valkeinen 已提交
5547 5548 5549 5550
err_probe_of:
	dsi_uninit_output(dsidev);
	dsi_runtime_put(dsidev);

5551
err_runtime_get:
5552
	pm_runtime_disable(&dsidev->dev);
T
Tomi Valkeinen 已提交
5553 5554 5555
	return r;
}

T
Tomi Valkeinen 已提交
5556
static void dsi_unbind(struct device *dev, struct device *master, void *data)
T
Tomi Valkeinen 已提交
5557
{
T
Tomi Valkeinen 已提交
5558
	struct platform_device *dsidev = to_platform_device(dev);
5559 5560
	struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);

5561
	of_platform_depopulate(&dsidev->dev);
T
Tomi Valkeinen 已提交
5562

5563 5564
	WARN_ON(dsi->scp_clk_refcount > 0);

5565 5566
	dss_pll_unregister(&dsi->pll);

5567 5568
	dsi_uninit_output(dsidev);

5569 5570
	pm_runtime_disable(&dsidev->dev);

5571 5572 5573
	if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
		regulator_disable(dsi->vdds_dsi_reg);
		dsi->vdds_dsi_enabled = false;
5574
	}
T
Tomi Valkeinen 已提交
5575 5576 5577 5578 5579 5580
}

static const struct component_ops dsi_component_ops = {
	.bind	= dsi_bind,
	.unbind	= dsi_unbind,
};
5581

T
Tomi Valkeinen 已提交
5582 5583 5584 5585 5586 5587 5588 5589
static int dsi_probe(struct platform_device *pdev)
{
	return component_add(&pdev->dev, &dsi_component_ops);
}

static int dsi_remove(struct platform_device *pdev)
{
	component_del(&pdev->dev, &dsi_component_ops);
5590 5591 5592
	return 0;
}

5593 5594
static int dsi_runtime_suspend(struct device *dev)
{
5595 5596 5597 5598 5599 5600 5601 5602 5603
	struct platform_device *pdev = to_platform_device(dev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);

	dsi->is_enabled = false;
	/* ensure the irq handler sees the is_enabled value */
	smp_wmb();
	/* wait for current handler to finish before turning the DSI off */
	synchronize_irq(dsi->irq);

5604 5605 5606 5607 5608 5609 5610
	dispc_runtime_put();

	return 0;
}

static int dsi_runtime_resume(struct device *dev)
{
5611 5612
	struct platform_device *pdev = to_platform_device(dev);
	struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5613 5614 5615 5616
	int r;

	r = dispc_runtime_get();
	if (r)
5617
		return r;
5618

5619 5620 5621 5622
	dsi->is_enabled = true;
	/* ensure the irq handler sees the is_enabled value */
	smp_wmb();

5623 5624 5625 5626 5627 5628 5629 5630
	return 0;
}

static const struct dev_pm_ops dsi_pm_ops = {
	.runtime_suspend = dsi_runtime_suspend,
	.runtime_resume = dsi_runtime_resume,
};

5631
static struct platform_driver omap_dsihw_driver = {
T
Tomi Valkeinen 已提交
5632 5633
	.probe		= dsi_probe,
	.remove		= dsi_remove,
5634
	.driver         = {
5635
		.name   = "omapdss_dsi",
5636
		.pm	= &dsi_pm_ops,
T
Tomi Valkeinen 已提交
5637
		.of_match_table = dsi_of_match,
T
Tomi Valkeinen 已提交
5638
		.suppress_bind_attrs = true,
5639 5640 5641
	},
};

T
Tomi Valkeinen 已提交
5642
int __init dsi_init_platform_driver(void)
5643
{
5644
	return platform_driver_register(&omap_dsihw_driver);
5645 5646
}

5647
void dsi_uninit_platform_driver(void)
5648
{
5649
	platform_driver_unregister(&omap_dsihw_driver);
5650
}