提交 7093d6cd 编写于 作者: L Laurent Pinchart 提交者: Tomi Valkeinen

drm: omapdrm: dsi: Pass the dsi_data pointer to internal functions

Internal dsi functions take a pointer to the DSI platform_device and
then cast it to a dsi_data pointer. That's pointless as the caller
already has the dsi_data pointer. Pass it directly instead of the
platform_device pointer.
Signed-off-by: NLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: NSebastian Reichel <sebastian.reichel@collabora.co.uk>
上级 b40d0ed6
......@@ -119,11 +119,11 @@ struct dsi_reg { u16 module; u16 idx; };
#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
#define REG_GET(dsidev, idx, start, end) \
FLD_GET(dsi_read_reg(dsidev, idx), start, end)
#define REG_GET(dsi, idx, start, end) \
FLD_GET(dsi_read_reg(dsi, idx), start, end)
#define REG_FLD_MOD(dsidev, idx, val, start, end) \
dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
#define REG_FLD_MOD(dsi, idx, val, start, end) \
dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
/* Global interrupts */
#define DSI_IRQ_VC0 (1 << 0)
......@@ -213,13 +213,14 @@ struct dsi_reg { u16 module; u16 idx; };
DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
struct dsi_data;
static int dsi_display_init_dispc(struct platform_device *dsidev,
static int dsi_display_init_dispc(struct dsi_data *dsi,
enum omap_channel channel);
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
static void dsi_display_uninit_dispc(struct dsi_data *dsi,
enum omap_channel channel);
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
/* DSI PLL HSDIV indices */
#define HSDIV_DISPC 0
......@@ -282,7 +283,7 @@ struct dsi_isr_tables {
};
struct dsi_clk_calc_ctx {
struct platform_device *dsidev;
struct dsi_data *dsi;
struct dss_pll *pll;
/* inputs */
......@@ -429,7 +430,7 @@ struct dsi_data {
};
struct dsi_packet_sent_handler_data {
struct platform_device *dsidev;
struct dsi_data *dsi;
struct completion *completion;
};
......@@ -448,7 +449,7 @@ static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss
return to_platform_device(dssdev->dev);
}
static struct platform_device *dsi_get_dsidev_from_id(int module)
static struct dsi_data *dsi_get_dsi_from_id(int module)
{
struct omap_dss_device *out;
enum omap_dss_output_id id;
......@@ -466,13 +467,12 @@ static struct platform_device *dsi_get_dsidev_from_id(int module)
out = omap_dss_get_output(id);
return out ? to_platform_device(out->dev) : NULL;
return out ? dsi_get_dsidrv_data(to_platform_device(out->dev)) : NULL;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
const struct dsi_reg idx, u32 val)
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
......@@ -485,10 +485,8 @@ static inline void dsi_write_reg(struct platform_device *dsidev,
__raw_writel(val, base + idx.idx);
}
static inline u32 dsi_read_reg(struct platform_device *dsidev,
const struct dsi_reg idx)
static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
......@@ -517,10 +515,8 @@ static void dsi_bus_unlock(struct omap_dss_device *dssdev)
up(&dsi->bus_lock);
}
static bool dsi_bus_is_locked(struct platform_device *dsidev)
static bool dsi_bus_is_locked(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi->bus_lock.count == 0;
}
......@@ -529,8 +525,9 @@ static void dsi_completion_handler(void *data, u32 mask)
complete((struct completion *)data);
}
static inline bool wait_for_bit_change(struct platform_device *dsidev,
const struct dsi_reg idx, int bitnum, int value)
static inline bool wait_for_bit_change(struct dsi_data *dsi,
const struct dsi_reg idx,
int bitnum, int value)
{
unsigned long timeout;
ktime_t wait;
......@@ -539,14 +536,14 @@ static inline bool wait_for_bit_change(struct platform_device *dsidev,
/* first busyloop to see if the bit changes right away */
t = 100;
while (t-- > 0) {
if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
if (REG_GET(dsi, idx, bitnum, bitnum) == value)
return true;
}
/* then loop for 500ms, sleeping for 1ms in between */
timeout = jiffies + msecs_to_jiffies(500);
while (time_before(jiffies, timeout)) {
if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
if (REG_GET(dsi, idx, bitnum, bitnum) == value)
return true;
wait = ns_to_ktime(1000 * 1000);
......@@ -574,21 +571,18 @@ static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
}
#ifdef DSI_PERF_MEASURE
static void dsi_perf_mark_setup(struct platform_device *dsidev)
static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_setup_time = ktime_get();
}
static void dsi_perf_mark_start(struct platform_device *dsidev)
static void dsi_perf_mark_start(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_start_time = ktime_get();
}
static void dsi_perf_show(struct platform_device *dsidev, const char *name)
static void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
......@@ -622,16 +616,15 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
static inline void dsi_perf_mark_setup(struct dsi_data *dsi)
{
}
static inline void dsi_perf_mark_start(struct platform_device *dsidev)
static inline void dsi_perf_mark_start(struct dsi_data *dsi)
{
}
static inline void dsi_perf_show(struct platform_device *dsidev,
const char *name)
static inline void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
}
#endif
......@@ -728,10 +721,9 @@ static void print_irq_status_cio(u32 status)
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
static void dsi_collect_irq_stats(struct dsi_data *dsi, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
spin_lock(&dsi->irq_stats_lock);
......@@ -747,15 +739,14 @@ static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
spin_unlock(&dsi->irq_stats_lock);
}
#else
#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
#define dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus)
#endif
static int debug_irq;
static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
static void dsi_handle_irq_errors(struct dsi_data *dsi, u32 irqstatus,
u32 *vcstatus, u32 ciostatus)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
if (irqstatus & DSI_IRQ_ERROR_MASK) {
......@@ -824,20 +815,16 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
struct platform_device *dsidev;
struct dsi_data *dsi;
struct dsi_data *dsi = arg;
u32 irqstatus, vcstatus[4], ciostatus;
int i;
dsidev = (struct platform_device *) arg;
dsi = dsi_get_dsidrv_data(dsidev);
if (!dsi->is_enabled)
return IRQ_NONE;
spin_lock(&dsi->irq_lock);
irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
irqstatus = dsi_read_reg(dsi, DSI_IRQSTATUS);
/* IRQ is not for us */
if (!irqstatus) {
......@@ -845,9 +832,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
dsi_write_reg(dsi, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
dsi_read_reg(dsidev, DSI_IRQSTATUS);
dsi_read_reg(dsi, DSI_IRQSTATUS);
for (i = 0; i < 4; ++i) {
if ((irqstatus & (1 << i)) == 0) {
......@@ -855,19 +842,19 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
continue;
}
vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
vcstatus[i] = dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
dsi_write_reg(dsi, DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
}
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
ciostatus = dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
dsi_write_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
} else {
ciostatus = 0;
}
......@@ -886,19 +873,20 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
dsi_handle_irq_errors(dsi, irqstatus, vcstatus, ciostatus);
dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus);
return IRQ_HANDLED;
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
struct dsi_isr_data *isr_array,
unsigned int isr_array_size, u32 default_mask,
const struct dsi_reg enable_reg,
const struct dsi_reg status_reg)
static void _omap_dsi_configure_irqs(struct dsi_data *dsi,
struct dsi_isr_data *isr_array,
unsigned int isr_array_size,
u32 default_mask,
const struct dsi_reg enable_reg,
const struct dsi_reg status_reg)
{
struct dsi_isr_data *isr_data;
u32 mask;
......@@ -916,54 +904,48 @@ static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
mask |= isr_data->mask;
}
old_mask = dsi_read_reg(dsidev, enable_reg);
old_mask = dsi_read_reg(dsi, enable_reg);
/* clear the irqstatus for newly enabled irqs */
dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
dsi_write_reg(dsidev, enable_reg, mask);
dsi_write_reg(dsi, status_reg, (mask ^ old_mask) & mask);
dsi_write_reg(dsi, enable_reg, mask);
/* flush posted writes */
dsi_read_reg(dsidev, enable_reg);
dsi_read_reg(dsidev, status_reg);
dsi_read_reg(dsi, enable_reg);
dsi_read_reg(dsi, status_reg);
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs(struct platform_device *dsidev)
static void _omap_dsi_set_irqs(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 mask = DSI_IRQ_ERROR_MASK;
#ifdef DSI_CATCH_MISSING_TE
mask |= DSI_IRQ_TE_TRIGGER;
#endif
_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
DSI_IRQENABLE, DSI_IRQSTATUS);
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
static void _omap_dsi_set_irqs_vc(struct dsi_data *dsi, int vc)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
DSI_VC_IRQ_ERROR_MASK,
DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}
/* dsi->irq_lock has to be locked by the caller */
static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
static void _omap_dsi_set_irqs_cio(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
_omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
_omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_cio,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
DSI_CIO_IRQ_ERROR_MASK,
DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
}
static void _dsi_initialize_irq(struct platform_device *dsidev)
static void _dsi_initialize_irq(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int vc;
......@@ -971,10 +953,10 @@ static void _dsi_initialize_irq(struct platform_device *dsidev)
memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
_omap_dsi_set_irqs(dsidev);
_omap_dsi_set_irqs(dsi);
for (vc = 0; vc < 4; ++vc)
_omap_dsi_set_irqs_vc(dsidev, vc);
_omap_dsi_set_irqs_cio(dsidev);
_omap_dsi_set_irqs_vc(dsi, vc);
_omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
}
......@@ -1035,10 +1017,9 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
return -EINVAL;
}
static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
void *arg, u32 mask)
static int dsi_register_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1048,17 +1029,16 @@ static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
_omap_dsi_set_irqs(dsidev);
_omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_unregister_isr(struct platform_device *dsidev,
omap_dsi_isr_t isr, void *arg, u32 mask)
static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1068,17 +1048,16 @@ static int dsi_unregister_isr(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
_omap_dsi_set_irqs(dsidev);
_omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
omap_dsi_isr_t isr, void *arg, u32 mask)
static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1089,17 +1068,16 @@ static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
_omap_dsi_set_irqs_vc(dsidev, channel);
_omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
omap_dsi_isr_t isr, void *arg, u32 mask)
static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
omap_dsi_isr_t isr, void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1110,17 +1088,16 @@ static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
_omap_dsi_set_irqs_vc(dsidev, channel);
_omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_register_isr_cio(struct platform_device *dsidev,
omap_dsi_isr_t isr, void *arg, u32 mask)
static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1130,17 +1107,16 @@ static int dsi_register_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
_omap_dsi_set_irqs_cio(dsidev);
_omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static int dsi_unregister_isr_cio(struct platform_device *dsidev,
omap_dsi_isr_t isr, void *arg, u32 mask)
static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
void *arg, u32 mask)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
......@@ -1150,18 +1126,18 @@ static int dsi_unregister_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
_omap_dsi_set_irqs_cio(dsidev);
_omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
static u32 dsi_get_errors(struct platform_device *dsidev)
static u32 dsi_get_errors(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
u32 e;
spin_lock_irqsave(&dsi->errors_lock, flags);
e = dsi->errors;
dsi->errors = 0;
......@@ -1169,10 +1145,9 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
static int dsi_runtime_get(struct platform_device *dsidev)
static int dsi_runtime_get(struct dsi_data *dsi)
{
int r;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DSSDBG("dsi_runtime_get\n");
......@@ -1181,9 +1156,8 @@ static int dsi_runtime_get(struct platform_device *dsidev)
return r < 0 ? r : 0;
}
static void dsi_runtime_put(struct platform_device *dsidev)
static void dsi_runtime_put(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
DSSDBG("dsi_runtime_put\n");
......@@ -1192,9 +1166,8 @@ static void dsi_runtime_put(struct platform_device *dsidev)
WARN_ON(r < 0 && r != -ENOSYS);
}
static int dsi_regulator_init(struct platform_device *dsidev)
static int dsi_regulator_init(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
if (dsi->vdds_dsi_reg != NULL)
......@@ -1213,16 +1186,15 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return 0;
}
static void _dsi_print_reset_status(struct platform_device *dsidev)
static void _dsi_print_reset_status(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 l;
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
......@@ -1235,7 +1207,7 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
}
#define DSI_FLD_GET(fld, start, end)\
FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end)
pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
DSI_FLD_GET(PLL_STATUS, 0, 0),
......@@ -1250,14 +1222,14 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
#undef DSI_FLD_GET
}
static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
static inline int dsi_if_enable(struct dsi_data *dsi, bool enable)
{
DSSDBG("dsi_if_enable(%d)\n", enable);
enable = enable ? 1 : 0;
REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
REG_FLD_MOD(dsi, DSI_CTRL, enable, 0, 0); /* IF_EN */
if (!wait_for_bit_change(dsidev, DSI_CTRL, 0, enable)) {
if (!wait_for_bit_change(dsi, DSI_CTRL, 0, enable)) {
DSSERR("Failed to set dsi_if_enable to %d\n", enable);
return -EIO;
}
......@@ -1265,31 +1237,24 @@ static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
return 0;
}
static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi->pll.cinfo.clkout[HSDIV_DISPC];
}
static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi->pll.cinfo.clkout[HSDIV_DSI];
}
static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
static unsigned long dsi_get_txbyteclkhs(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi->pll.cinfo.clkdco / 16;
}
static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
static unsigned long dsi_fclk_rate(struct dsi_data *dsi)
{
unsigned long r;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
enum dss_clk_source source;
source = dss_get_dsi_clk_source(dsi->dss, dsi->module_id);
......@@ -1298,7 +1263,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
r = dsi_get_pll_hsdiv_dsi_rate(dsi);
}
return r;
......@@ -1323,9 +1288,8 @@ static int dsi_lp_clock_calc(unsigned long dsi_fclk,
return 0;
}
static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
static int dsi_set_lp_clk_divisor(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long dsi_fclk;
unsigned int lp_clk_div;
unsigned long lp_clk;
......@@ -1337,7 +1301,7 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
return -EINVAL;
dsi_fclk = dsi_fclk_rate(dsidev);
dsi_fclk = dsi_fclk_rate(dsi);
lp_clk = dsi_fclk / 2 / lp_clk_div;
......@@ -1346,29 +1310,25 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
/* LP_CLK_DIVISOR */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, lp_clk_div, 12, 0);
/* LP_RX_SYNCHRO_ENABLE */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
return 0;
}
static void dsi_enable_scp_clk(struct platform_device *dsidev)
static void dsi_enable_scp_clk(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dsi->scp_clk_refcount++ == 0)
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
}
static void dsi_disable_scp_clk(struct platform_device *dsidev)
static void dsi_disable_scp_clk(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
WARN_ON(dsi->scp_clk_refcount == 0);
if (--dsi->scp_clk_refcount == 0)
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
}
enum dsi_pll_power_state {
......@@ -1378,10 +1338,8 @@ enum dsi_pll_power_state {
DSI_PLL_POWER_ON_DIV = 0x3,
};
static int dsi_pll_power(struct platform_device *dsidev,
enum dsi_pll_power_state state)
static int dsi_pll_power(struct dsi_data *dsi, enum dsi_pll_power_state state)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t = 0;
/* DSI-PLL power command 0x3 is not working */
......@@ -1390,10 +1348,10 @@ static int dsi_pll_power(struct platform_device *dsidev,
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, state, 31, 30);
/* PLL_PWR_STATUS */
while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
while (FLD_GET(dsi_read_reg(dsi, DSI_CLK_CTRL), 29, 28) != state) {
if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
......@@ -1420,23 +1378,22 @@ static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
static int dsi_pll_enable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
struct platform_device *dsidev = dsi->pdev;
int r = 0;
DSSDBG("PLL init\n");
r = dsi_regulator_init(dsidev);
r = dsi_regulator_init(dsi);
if (r)
return r;
r = dsi_runtime_get(dsidev);
r = dsi_runtime_get(dsi);
if (r)
return r;
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
*/
dsi_enable_scp_clk(dsidev);
dsi_enable_scp_clk(dsi);
if (!dsi->vdds_dsi_enabled) {
r = regulator_enable(dsi->vdds_dsi_reg);
......@@ -1448,7 +1405,7 @@ static int dsi_pll_enable(struct dss_pll *pll)
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable(1);
if (!wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1)) {
if (!wait_for_bit_change(dsi, DSI_PLL_STATUS, 0, 1)) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
dispc_pck_free_enable(0);
......@@ -1459,7 +1416,7 @@ static int dsi_pll_enable(struct dss_pll *pll)
* fill the whole display. No idea about this */
dispc_pck_free_enable(0);
r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
r = dsi_pll_power(dsi, DSI_PLL_POWER_ON_ALL);
if (r)
goto err1;
......@@ -1473,24 +1430,22 @@ static int dsi_pll_enable(struct dss_pll *pll)
dsi->vdds_dsi_enabled = false;
}
err0:
dsi_disable_scp_clk(dsidev);
dsi_runtime_put(dsidev);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return r;
}
static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
if (disconnect_lanes) {
WARN_ON(!dsi->vdds_dsi_enabled);
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
dsi_disable_scp_clk(dsidev);
dsi_runtime_put(dsidev);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
DSSDBG("PLL uninit done\n");
}
......@@ -1498,15 +1453,12 @@ static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes
static void dsi_pll_disable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
struct platform_device *dsidev = dsi->pdev;
dsi_pll_uninit(dsidev, true);
dsi_pll_uninit(dsi, true);
}
static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
struct seq_file *s)
static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
......@@ -1515,7 +1467,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
dispc_clk_src = dss_get_dispc_clk_source(dsi->dss);
dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
if (dsi_runtime_get(dsidev))
if (dsi_runtime_get(dsi))
return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
......@@ -1550,35 +1502,33 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "dsi fclk source = %s\n",
dss_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsi));
seq_printf(s, "DDR_CLK\t\t%lu\n",
cinfo->clkdco / 4);
seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsi));
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
dsi_runtime_put(dsidev);
dsi_runtime_put(dsi);
}
void dsi_dump_clocks(struct seq_file *s)
{
struct platform_device *dsidev;
struct dsi_data *dsi;
int i;
for (i = 0; i < MAX_NUM_DSI; i++) {
dsidev = dsi_get_dsidev_from_id(i);
if (dsidev)
dsi_dump_dsidev_clocks(dsidev, s);
dsi = dsi_get_dsi_from_id(i);
if (dsi)
dsi_dump_dsi_clocks(dsi, s);
}
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
struct seq_file *s)
static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
struct dsi_irq_stats stats;
......@@ -1666,29 +1616,28 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
static int dsi1_dump_irqs(struct seq_file *s, void *p)
{
struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
struct dsi_data *dsi = dsi_get_dsi_from_id(0);
dsi_dump_dsidev_irqs(dsidev, s);
dsi_dump_dsi_irqs(dsi, s);
return 0;
}
static int dsi2_dump_irqs(struct seq_file *s, void *p)
{
struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
struct dsi_data *dsi = dsi_get_dsi_from_id(1);
dsi_dump_dsidev_irqs(dsidev, s);
dsi_dump_dsi_irqs(dsi, s);
return 0;
}
#endif
static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
struct seq_file *s)
static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
if (dsi_runtime_get(dsidev))
if (dsi_runtime_get(dsi))
return;
dsi_enable_scp_clk(dsidev);
dsi_enable_scp_clk(dsi);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
......@@ -1760,24 +1709,24 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
dsi_disable_scp_clk(dsidev);
dsi_runtime_put(dsidev);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
#undef DUMPREG
}
static int dsi1_dump_regs(struct seq_file *s, void *p)
{
struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
struct dsi_data *dsi = dsi_get_dsi_from_id(0);
dsi_dump_dsidev_regs(dsidev, s);
dsi_dump_dsi_regs(dsi, s);
return 0;
}
static int dsi2_dump_regs(struct seq_file *s, void *p)
{
struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
struct dsi_data *dsi = dsi_get_dsi_from_id(1);
dsi_dump_dsidev_regs(dsidev, s);
dsi_dump_dsi_regs(dsi, s);
return 0;
}
......@@ -1787,16 +1736,15 @@ enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_ULPS = 0x2,
};
static int dsi_cio_power(struct platform_device *dsidev,
enum dsi_cio_power_state state)
static int dsi_cio_power(struct dsi_data *dsi, enum dsi_cio_power_state state)
{
int t = 0;
/* PWR_CMD */
REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG1, state, 28, 27);
/* PWR_STATUS */
while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
while (FLD_GET(dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1),
26, 25) != state) {
if (++t > 1000) {
DSSERR("failed to set complexio power state to "
......@@ -1809,9 +1757,8 @@ static int dsi_cio_power(struct platform_device *dsidev,
return 0;
}
static unsigned int dsi_get_line_buf_size(struct platform_device *dsidev)
static unsigned int dsi_get_line_buf_size(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
......@@ -1821,7 +1768,7 @@ static unsigned int dsi_get_line_buf_size(struct platform_device *dsidev)
if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
val = REG_GET(dsi, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
switch (val) {
case 1:
......@@ -1844,9 +1791,8 @@ static unsigned int dsi_get_line_buf_size(struct platform_device *dsidev)
}
}
static int dsi_set_lane_config(struct platform_device *dsidev)
static int dsi_set_lane_config(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
static const u8 offsets[] = { 0, 4, 8, 12, 16 };
static const enum dsi_lane_function functions[] = {
DSI_LANE_CLK,
......@@ -1858,7 +1804,7 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
u32 r;
int i;
r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
r = dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1);
for (i = 0; i < dsi->num_lanes_used; ++i) {
unsigned int offset = offsets[i];
......@@ -1887,33 +1833,28 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
dsi_write_reg(dsi, DSI_COMPLEXIO_CFG1, r);
return 0;
}
static inline unsigned int ns2ddr(struct platform_device *dsidev,
unsigned int ns)
static inline unsigned int ns2ddr(struct dsi_data *dsi, unsigned int ns)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
/* convert time in ns to ddr ticks, rounding up */
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}
static inline unsigned int ddr2ns(struct platform_device *dsidev,
unsigned int ddr)
static inline unsigned int ddr2ns(struct dsi_data *dsi, unsigned int ddr)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
static void dsi_cio_timings(struct platform_device *dsidev)
static void dsi_cio_timings(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
......@@ -1924,54 +1865,54 @@ static void dsi_cio_timings(struct platform_device *dsidev)
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
ths_prepare = ns2ddr(dsidev, 70) + 2;
ths_prepare = ns2ddr(dsi, 70) + 2;
/* min 145ns + 10*UI */
ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
ths_prepare_ths_zero = ns2ddr(dsi, 175) + 2;
/* min max(8*UI, 60ns+4*UI) */
ths_trail = ns2ddr(dsidev, 60) + 5;
ths_trail = ns2ddr(dsi, 60) + 5;
/* min 100ns */
ths_exit = ns2ddr(dsidev, 145);
ths_exit = ns2ddr(dsi, 145);
/* tlpx min 50n */
tlpx_half = ns2ddr(dsidev, 25);
tlpx_half = ns2ddr(dsi, 25);
/* min 60ns */
tclk_trail = ns2ddr(dsidev, 60) + 2;
tclk_trail = ns2ddr(dsi, 60) + 2;
/* min 38ns, max 95ns */
tclk_prepare = ns2ddr(dsidev, 65);
tclk_prepare = ns2ddr(dsi, 65);
/* min tclk-prepare + tclk-zero = 300ns */
tclk_zero = ns2ddr(dsidev, 260);
tclk_zero = ns2ddr(dsi, 260);
DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
ths_prepare, ddr2ns(dsidev, ths_prepare),
ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
ths_prepare, ddr2ns(dsi, ths_prepare),
ths_prepare_ths_zero, ddr2ns(dsi, ths_prepare_ths_zero));
DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
ths_trail, ddr2ns(dsidev, ths_trail),
ths_exit, ddr2ns(dsidev, ths_exit));
ths_trail, ddr2ns(dsi, ths_trail),
ths_exit, ddr2ns(dsi, ths_exit));
DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
"tclk_zero %u (%uns)\n",
tlpx_half, ddr2ns(dsidev, tlpx_half),
tclk_trail, ddr2ns(dsidev, tclk_trail),
tclk_zero, ddr2ns(dsidev, tclk_zero));
tlpx_half, ddr2ns(dsi, tlpx_half),
tclk_trail, ddr2ns(dsi, tclk_trail),
tclk_zero, ddr2ns(dsi, tclk_zero));
DSSDBG("tclk_prepare %u (%uns)\n",
tclk_prepare, ddr2ns(dsidev, tclk_prepare));
tclk_prepare, ddr2ns(dsi, tclk_prepare));
/* program timings */
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
r = FLD_MOD(r, ths_prepare, 31, 24);
r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
r = FLD_MOD(r, ths_trail, 15, 8);
r = FLD_MOD(r, ths_exit, 7, 0);
dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
dsi_write_reg(dsi, DSI_DSIPHY_CFG0, r);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
r = FLD_MOD(r, tlpx_half, 20, 16);
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
......@@ -1982,18 +1923,18 @@ static void dsi_cio_timings(struct platform_device *dsidev)
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
}
dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
dsi_write_reg(dsi, DSI_DSIPHY_CFG1, r);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
r = FLD_MOD(r, tclk_prepare, 7, 0);
dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
unsigned int mask_p, unsigned int mask_n)
static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
unsigned int mask_p,
unsigned int mask_n)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u32 l;
u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
......@@ -2022,26 +1963,25 @@ static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
/* Set the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
/* Enable lane override */
/* ENLPTXSCPDAT */
REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
}
static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
{
/* Disable lane override */
REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
/* Reset the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
}
static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t, i;
bool in_use[DSI_MAX_NR_LANES];
static const u8 offsets_old[] = { 28, 27, 26 };
......@@ -2061,7 +2001,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
u32 l;
int ok;
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
ok = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
......@@ -2088,9 +2028,8 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
}
/* return bitmask of enabled lanes, lane0 being the lsb */
static unsigned int dsi_get_lane_mask(struct platform_device *dsidev)
static unsigned int dsi_get_lane_mask(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned int mask = 0;
int i;
......@@ -2179,42 +2118,41 @@ static void dsi_disable_pads(struct dsi_data *dsi)
dsi_omap5_mux_pads(dsi, 0);
}
static int dsi_cio_init(struct platform_device *dsidev)
static int dsi_cio_init(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
u32 l;
DSSDBG("DSI CIO init starts");
r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsi));
if (r)
return r;
dsi_enable_scp_clk(dsidev);
dsi_enable_scp_clk(dsi);
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (!wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1)) {
if (!wait_for_bit_change(dsi, DSI_DSIPHY_CFG5, 30, 1)) {
DSSERR("CIO SCP Clock domain not coming out of reset.\n");
r = -EIO;
goto err_scp_clk_dom;
}
r = dsi_set_lane_config(dsidev);
r = dsi_set_lane_config(dsi);
if (r)
goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
l = dsi_read_reg(dsidev, DSI_TIMING1);
l = dsi_read_reg(dsi, DSI_TIMING1);
l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsidev, DSI_TIMING1, l);
dsi_write_reg(dsi, DSI_TIMING1, l);
if (dsi->ulps_enabled) {
unsigned int mask_p;
......@@ -2239,24 +2177,24 @@ static int dsi_cio_init(struct platform_device *dsidev)
mask_p |= 1 << i;
}
dsi_cio_enable_lane_override(dsidev, mask_p, 0);
dsi_cio_enable_lane_override(dsi, mask_p, 0);
}
r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
if (!wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1)) {
if (!wait_for_bit_change(dsi, DSI_COMPLEXIO_CFG1, 29, 1)) {
DSSERR("CIO PWR clock domain not coming out of reset.\n");
r = -ENODEV;
goto err_cio_pwr_dom;
}
dsi_if_enable(dsidev, true);
dsi_if_enable(dsidev, false);
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
dsi_if_enable(dsi, true);
dsi_if_enable(dsi, false);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
r = dsi_cio_wait_tx_clk_esc_reset(dsi);
if (r)
goto err_tx_clk_esc_rst;
......@@ -2268,17 +2206,17 @@ static int dsi_cio_init(struct platform_device *dsidev)
/* Disable the override. The lanes should be set to Mark-11
* state by the HW */
dsi_cio_disable_lane_override(dsidev);
dsi_cio_disable_lane_override(dsi);
}
/* FORCE_TX_STOP_MODE_IO */
REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
dsi_cio_timings(dsidev);
dsi_cio_timings(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
/* DDR_CLK_ALWAYS_ON */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
REG_FLD_MOD(dsi, DSI_CLK_CTRL,
dsi->vm_timings.ddr_clk_always_on, 13, 13);
}
......@@ -2289,35 +2227,32 @@ static int dsi_cio_init(struct platform_device *dsidev)
return 0;
err_tx_clk_esc_rst:
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
err_cio_pwr_dom:
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
if (dsi->ulps_enabled)
dsi_cio_disable_lane_override(dsidev);
dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
return r;
}
static void dsi_cio_uninit(struct platform_device *dsidev)
static void dsi_cio_uninit(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
/* DDR_CLK_ALWAYS_ON */
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
static void dsi_config_tx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
......@@ -2343,14 +2278,13 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
add += size;
}
dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
dsi_write_reg(dsi, DSI_TX_FIFO_VC_SIZE, r);
}
static void dsi_config_rx_fifo(struct platform_device *dsidev,
static void dsi_config_rx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
......@@ -2376,18 +2310,18 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
add += size;
}
dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
dsi_write_reg(dsi, DSI_RX_FIFO_VC_SIZE, r);
}
static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
{
u32 r;
r = dsi_read_reg(dsidev, DSI_TIMING1);
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
dsi_write_reg(dsidev, DSI_TIMING1, r);
dsi_write_reg(dsi, DSI_TIMING1, r);
if (!wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0)) {
if (!wait_for_bit_change(dsi, DSI_TIMING1, 15, 0)) {
DSSERR("TX_STOP bit not going down\n");
return -EIO;
}
......@@ -2395,29 +2329,28 @@ static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
return 0;
}
static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
{
return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
struct dsi_data *dsi = vp_data->dsi;
const int channel = dsi->update_channel;
u8 bit = dsi->te_enabled ? 30 : 31;
if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
complete(vp_data->completion);
}
static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
.dsidev = dsidev,
.dsi = dsi,
.completion = &completion
};
int r = 0;
......@@ -2425,13 +2358,13 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
bit = dsi->te_enabled ? 30 : 31;
r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
......@@ -2440,12 +2373,12 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
}
}
dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
......@@ -2455,29 +2388,29 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
struct dsi_data *dsi = l4_data->dsi;
const int channel = dsi->update_channel;
if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
complete(l4_data->completion);
}
static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
.dsidev = dsidev,
.dsi = dsi,
.completion = &completion
};
int r = 0;
r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
......@@ -2486,50 +2419,47 @@ static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
}
}
dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
static int dsi_sync_vc(struct platform_device *dsidev, int channel)
static int dsi_sync_vc(struct dsi_data *dsi, int channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
if (!dsi_vc_is_enabled(dsidev, channel))
if (!dsi_vc_is_enabled(dsi, channel))
return 0;
switch (dsi->vc[channel].source) {
case DSI_VC_SOURCE_VP:
return dsi_sync_vc_vp(dsidev, channel);
return dsi_sync_vc_vp(dsi, channel);
case DSI_VC_SOURCE_L4:
return dsi_sync_vc_l4(dsidev, channel);
return dsi_sync_vc_l4(dsi, channel);
default:
BUG();
return -EINVAL;
}
}
static int dsi_vc_enable(struct platform_device *dsidev, int channel,
bool enable)
static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
{
DSSDBG("dsi_vc_enable channel %d, enable %d\n",
channel, enable);
enable = enable ? 1 : 0;
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
if (!wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 0, enable)) {
if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
return -EIO;
}
......@@ -2537,14 +2467,13 @@ static int dsi_vc_enable(struct platform_device *dsidev, int channel,
return 0;
}
static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
DSSDBG("Initial config of virtual channel %d", channel);
r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
......@@ -2563,41 +2492,39 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
dsi->vc[channel].source = DSI_VC_SOURCE_L4;
}
static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
enum dsi_vc_source source)
static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
enum dsi_vc_source source)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dsi->vc[channel].source == source)
return 0;
DSSDBG("Source config of virtual channel %d", channel);
dsi_sync_vc(dsidev, channel);
dsi_sync_vc(dsi, channel);
dsi_vc_enable(dsidev, channel, 0);
dsi_vc_enable(dsi, channel, 0);
/* VC_BUSY */
if (!wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0)) {
if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
DSSERR("vc(%d) busy when trying to config for VP\n", channel);
return -EIO;
}
/* SOURCE, 0 = L4, 1 = video port */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
bool enable = source == DSI_VC_SOURCE_VP;
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
}
dsi_vc_enable(dsidev, channel, 1);
dsi_vc_enable(dsi, channel, 1);
dsi->vc[channel].source = source;
......@@ -2612,28 +2539,28 @@ static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
dsi_vc_enable(dsidev, channel, 0);
dsi_if_enable(dsidev, 0);
dsi_vc_enable(dsi, channel, 0);
dsi_if_enable(dsi, 0);
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
dsi_vc_enable(dsidev, channel, 1);
dsi_if_enable(dsidev, 1);
dsi_vc_enable(dsi, channel, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsidev);
dsi_force_tx_stop_mode_io(dsi);
/* start the DDR clock by sending a NULL packet */
if (dsi->vm_timings.ddr_clk_always_on && enable)
dsi_vc_send_null(dssdev, channel);
dsi_vc_send_null(dsi, channel);
}
static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
{
while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
......@@ -2679,14 +2606,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
int channel)
static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
{
/* RX_FIFO_NOT_EMPTY */
while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
u8 dt;
val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
......@@ -2701,7 +2627,7 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
dsi_vc_flush_long_data(dsidev, channel);
dsi_vc_flush_long_data(dsi, channel);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
......@@ -2709,25 +2635,23 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
return 0;
}
static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dsi->debug_write || dsi->debug_read)
DSSDBG("dsi_vc_send_bta %d\n", channel);
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
dsi_vc_flush_receive_data(dsidev, channel);
dsi_vc_flush_receive_data(dsi, channel);
}
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
/* flush posted write */
dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
dsi_read_reg(dsi, DSI_VC_CTRL(channel));
return 0;
}
......@@ -2735,21 +2659,22 @@ static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
r = dsi_register_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
if (r)
goto err1;
r = dsi_vc_send_bta(dsidev, channel);
r = dsi_vc_send_bta(dsi, channel);
if (r)
goto err2;
......@@ -2760,41 +2685,40 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
goto err2;
}
err = dsi_get_errors(dsidev);
err = dsi_get_errors(dsi);
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
goto err2;
}
err2:
dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
int channel, u8 data_type, u16 len, u8 ecc)
static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
u8 data_type, u16 len, u8 ecc)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
data_id = data_type | dsi->vc[channel].vc_id << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
}
static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
int channel, u8 b1, u8 b2, u8 b3, u8 b4)
static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
......@@ -2803,14 +2727,13 @@ static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
}
static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
u8 data_type, u8 *data, u16 len, u8 ecc)
static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
u8 *data, u16 len, u8 ecc)
{
/*u32 val; */
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u8 *p;
int r = 0;
......@@ -2825,9 +2748,9 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
return -EINVAL;
}
dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
p = data;
for (i = 0; i < len >> 2; i++) {
......@@ -2839,7 +2762,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
b3 = *p++;
b4 = *p++;
dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
}
i = len % 4;
......@@ -2864,29 +2787,28 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
break;
}
dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
}
return r;
}
static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
u8 data_type, u16 data, u8 ecc)
static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
u16 data, u8 ecc)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u8 data_id;
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
channel,
data_type, data & 0xff, (data >> 8) & 0xff);
dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
......@@ -2895,41 +2817,39 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
r = (data_id << 0) | (data << 8) | (ecc << 24);
dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
return 0;
}
static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
0, 0);
return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
}
static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
int channel, u8 *data, int len, enum dss_dsi_content_type type)
static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
u8 *data, int len,
enum dss_dsi_content_type type)
{
int r;
if (len == 0) {
BUG_ON(type == DSS_DSI_CONTENT_DCS);
r = dsi_vc_send_short(dsidev, channel,
r = dsi_vc_send_short(dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
} else if (len == 1) {
r = dsi_vc_send_short(dsidev, channel,
r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
} else if (len == 2) {
r = dsi_vc_send_short(dsidev, channel,
r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
data[0] | (data[1] << 8), 0);
} else {
r = dsi_vc_send_long(dsidev, channel,
r = dsi_vc_send_long(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
......@@ -2942,8 +2862,9 @@ static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi_vc_write_nosync_common(dsidev, channel, data, len,
return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_DCS);
}
......@@ -2951,18 +2872,21 @@ static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int chann
u8 *data, int len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi_vc_write_nosync_common(dsidev, channel, data, len,
return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_GENERIC);
}
static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
u8 *data, int len, enum dss_dsi_content_type type)
static int dsi_vc_write_common(struct omap_dss_device *dssdev,
int channel, u8 *data, int len,
enum dss_dsi_content_type type)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
if (r)
goto err;
......@@ -2971,9 +2895,9 @@ static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
goto err;
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty after write, dumping data:\n");
dsi_vc_flush_receive_data(dsidev, channel);
dsi_vc_flush_receive_data(dsi, channel);
r = -EIO;
goto err;
}
......@@ -2999,17 +2923,16 @@ static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8
DSS_DSI_CONTENT_GENERIC);
}
static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
int channel, u8 dcs_cmd)
static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
u8 dcs_cmd)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
if (dsi->debug_read)
DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
channel, dcs_cmd);
r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
if (r) {
DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
" failed\n", channel, dcs_cmd);
......@@ -3019,10 +2942,9 @@ static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
return 0;
}
static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
int channel, u8 *reqdata, int reqlen)
static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
u8 *reqdata, int reqlen)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 data;
u8 data_type;
int r;
......@@ -3045,7 +2967,7 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return -EINVAL;
}
r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
if (r) {
DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
" failed\n", channel, reqlen);
......@@ -3055,22 +2977,21 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return 0;
}
static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
u8 *buf, int buflen, enum dss_dsi_content_type type)
static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
int buflen, enum dss_dsi_content_type type)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 dt;
int r;
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
......@@ -3133,7 +3054,7 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
int b;
val = dsi_read_reg(dsidev,
val = dsi_read_reg(dsi,
DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
......@@ -3168,9 +3089,10 @@ static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_c
u8 *buf, int buflen)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
if (r)
goto err;
......@@ -3178,7 +3100,7 @@ static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_c
if (r)
goto err;
r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
......@@ -3198,9 +3120,10 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
if (r)
return r;
......@@ -3208,7 +3131,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
if (r)
return r;
r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
return r;
......@@ -3225,21 +3148,21 @@ static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int cha
u16 len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
return dsi_vc_send_short(dsidev, channel,
return dsi_vc_send_short(dsi, channel,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
}
static int dsi_enter_ulps(struct platform_device *dsidev)
static int dsi_enter_ulps(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
int r, i;
unsigned int mask;
DSSDBG("Entering ULPS");
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(dsi->ulps_enabled);
......@@ -3247,35 +3170,35 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
return 0;
/* DDR_CLK_ALWAYS_ON */
if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
dsi_if_enable(dsidev, 0);
REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
dsi_if_enable(dsidev, 1);
if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
dsi_if_enable(dsi, 0);
REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
dsi_if_enable(dsi, 1);
}
dsi_sync_vc(dsidev, 0);
dsi_sync_vc(dsidev, 1);
dsi_sync_vc(dsidev, 2);
dsi_sync_vc(dsidev, 3);
dsi_sync_vc(dsi, 0);
dsi_sync_vc(dsi, 1);
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
dsi_force_tx_stop_mode_io(dsidev);
dsi_force_tx_stop_mode_io(dsi);
dsi_vc_enable(dsidev, 0, false);
dsi_vc_enable(dsidev, 1, false);
dsi_vc_enable(dsidev, 2, false);
dsi_vc_enable(dsidev, 3, false);
dsi_vc_enable(dsi, 0, false);
dsi_vc_enable(dsi, 1, false);
dsi_vc_enable(dsi, 2, false);
dsi_vc_enable(dsi, 3, false);
if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
DSSERR("HS busy when enabling ULPS\n");
return -EIO;
}
if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
DSSERR("LP busy when enabling ULPS\n");
return -EIO;
}
r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
if (r)
return r;
......@@ -3289,10 +3212,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
}
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(1000)) == 0) {
......@@ -3301,31 +3224,31 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
goto err;
}
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
/* Reset LANEx_ULPS_SIG2 */
REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
dsi_if_enable(dsidev, false);
dsi_if_enable(dsi, false);
dsi->ulps_enabled = true;
return 0;
err:
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
return r;
}
static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
unsigned int ticks, bool x4, bool x16)
static void dsi_set_lp_rx_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
......@@ -3334,14 +3257,14 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsidev);
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsidev, DSI_TIMING2);
r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
dsi_write_reg(dsidev, DSI_TIMING2, r);
dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
......@@ -3351,8 +3274,8 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_ta_timeout(struct platform_device *dsidev,
unsigned int ticks, bool x8, bool x16)
static void dsi_set_ta_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x8, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
......@@ -3361,14 +3284,14 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsidev);
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsidev, DSI_TIMING1);
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
dsi_write_reg(dsidev, DSI_TIMING1, r);
dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
......@@ -3378,8 +3301,8 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_stop_state_counter(struct platform_device *dsidev,
unsigned int ticks, bool x4, bool x16)
static void dsi_set_stop_state_counter(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
......@@ -3388,14 +3311,14 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
fck = dsi_fclk_rate(dsidev);
fck = dsi_fclk_rate(dsi);
r = dsi_read_reg(dsidev, DSI_TIMING1);
r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
dsi_write_reg(dsidev, DSI_TIMING1, r);
dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
......@@ -3405,8 +3328,8 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
unsigned int ticks, bool x4, bool x16)
static void dsi_set_hs_tx_timeout(struct dsi_data *dsi, unsigned int ticks,
bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
......@@ -3415,14 +3338,14 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in TxByteClkHS */
fck = dsi_get_txbyteclkhs(dsidev);
fck = dsi_get_txbyteclkhs(dsi);
r = dsi_read_reg(dsidev, DSI_TIMING2);
r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
dsi_write_reg(dsidev, DSI_TIMING2, r);
dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
......@@ -3432,9 +3355,8 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
......@@ -3454,12 +3376,11 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
}
/* LINE_BUFFER */
REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
REG_FLD_MOD(dsi, DSI_CTRL, num_line_buffers, 13, 12);
}
static void dsi_config_vp_sync_events(struct platform_device *dsidev)
static void dsi_config_vp_sync_events(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
bool sync_end;
u32 r;
......@@ -3468,7 +3389,7 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
else
sync_end = false;
r = dsi_read_reg(dsidev, DSI_CTRL);
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
......@@ -3476,12 +3397,11 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
dsi_write_reg(dsidev, DSI_CTRL, r);
dsi_write_reg(dsi, DSI_CTRL, r);
}
static void dsi_config_blanking_modes(struct platform_device *dsidev)
static void dsi_config_blanking_modes(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode = dsi->vm_timings.blanking_mode;
int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
......@@ -3492,12 +3412,12 @@ static void dsi_config_blanking_modes(struct platform_device *dsidev)
* 0 = TX FIFO packets sent or LPS in corresponding blanking periods
* 1 = Long blanking packets are sent in corresponding blanking periods
*/
r = dsi_read_reg(dsidev, DSI_CTRL);
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
dsi_write_reg(dsidev, DSI_CTRL, r);
dsi_write_reg(dsi, DSI_CTRL, r);
}
/*
......@@ -3562,9 +3482,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
return max(lp_inter, 0);
}
static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode;
int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
......@@ -3581,33 +3500,33 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int bl_interleave_hs = 0, bl_interleave_lp = 0;
u32 r;
r = dsi_read_reg(dsidev, DSI_CTRL);
r = dsi_read_reg(dsi, DSI_CTRL);
blanking_mode = FLD_GET(r, 20, 20);
hfp_blanking_mode = FLD_GET(r, 21, 21);
hbp_blanking_mode = FLD_GET(r, 22, 22);
hsa_blanking_mode = FLD_GET(r, 23, 23);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = dsi_read_reg(dsi, DSI_VM_TIMING1);
hbp = FLD_GET(r, 11, 0);
hfp = FLD_GET(r, 23, 12);
hsa = FLD_GET(r, 31, 24);
r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
r = dsi_read_reg(dsi, DSI_CLK_TIMING);
ddr_clk_post = FLD_GET(r, 7, 0);
ddr_clk_pre = FLD_GET(r, 15, 8);
r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
r = dsi_read_reg(dsi, DSI_VM_TIMING7);
exit_hs_mode_lat = FLD_GET(r, 15, 0);
enter_hs_mode_lat = FLD_GET(r, 31, 16);
r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
r = dsi_read_reg(dsi, DSI_CLK_CTRL);
lp_clk_div = FLD_GET(r, 12, 0);
ddr_alwon = FLD_GET(r, 13, 13);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_exit = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tclk_trail = FLD_GET(r, 15, 8);
exiths_clk = ths_exit + tclk_trail;
......@@ -3661,45 +3580,44 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
bl_interleave_lp);
r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
r = dsi_read_reg(dsi, DSI_VM_TIMING4);
r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
dsi_write_reg(dsi, DSI_VM_TIMING4, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
r = dsi_read_reg(dsi, DSI_VM_TIMING5);
r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
dsi_write_reg(dsi, DSI_VM_TIMING5, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
r = dsi_read_reg(dsi, DSI_VM_TIMING6);
r = FLD_MOD(r, bl_interleave_hs, 31, 15);
r = FLD_MOD(r, bl_interleave_lp, 16, 0);
dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
dsi_write_reg(dsi, DSI_VM_TIMING6, r);
}
static int dsi_proto_config(struct platform_device *dsidev)
static int dsi_proto_config(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
int buswidth = 0;
dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
dsi_config_tx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
dsi_config_rx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
dsi_set_stop_state_counter(dsi, 0x1000, false, false);
dsi_set_ta_timeout(dsi, 0x1fff, true, true);
dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
switch (dsi_get_pixel_size(dsi->pix_fmt)) {
case 16:
......@@ -3716,7 +3634,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
return -EINVAL;
}
r = dsi_read_reg(dsidev, DSI_CTRL);
r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
......@@ -3731,27 +3649,26 @@ static int dsi_proto_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, 25, 25);
}
dsi_write_reg(dsidev, DSI_CTRL, r);
dsi_write_reg(dsi, DSI_CTRL, r);
dsi_config_vp_num_line_buffers(dsidev);
dsi_config_vp_num_line_buffers(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_config_vp_sync_events(dsidev);
dsi_config_blanking_modes(dsidev);
dsi_config_cmd_mode_interleaving(dsidev);
dsi_config_vp_sync_events(dsi);
dsi_config_blanking_modes(dsi);
dsi_config_cmd_mode_interleaving(dsi);
}
dsi_vc_initial_config(dsidev, 0);
dsi_vc_initial_config(dsidev, 1);
dsi_vc_initial_config(dsidev, 2);
dsi_vc_initial_config(dsidev, 3);
dsi_vc_initial_config(dsi, 0);
dsi_vc_initial_config(dsi, 1);
dsi_vc_initial_config(dsi, 2);
dsi_vc_initial_config(dsi, 3);
return 0;
}
static void dsi_proto_timings(struct platform_device *dsidev)
static void dsi_proto_timings(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
......@@ -3762,25 +3679,25 @@ static void dsi_proto_timings(struct platform_device *dsidev)
int ndl = dsi->num_lanes_used - 1;
u32 r;
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_prepare = FLD_GET(r, 31, 24);
ths_prepare_ths_zero = FLD_GET(r, 23, 16);
ths_zero = ths_prepare_ths_zero - ths_prepare;
ths_trail = FLD_GET(r, 15, 8);
ths_exit = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
tclk_prepare = FLD_GET(r, 7, 0);
/* min 8*UI */
tclk_pre = 20;
/* min 60ns + 52*UI */
tclk_post = ns2ddr(dsidev, 60) + 26;
tclk_post = ns2ddr(dsi, 60) + 26;
ths_eot = DIV_ROUND_UP(4, ndl);
......@@ -3791,10 +3708,10 @@ static void dsi_proto_timings(struct platform_device *dsidev)
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
r = dsi_read_reg(dsi, DSI_CLK_TIMING);
r = FLD_MOD(r, ddr_clk_pre, 15, 8);
r = FLD_MOD(r, ddr_clk_post, 7, 0);
dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
dsi_write_reg(dsi, DSI_CLK_TIMING, r);
DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
ddr_clk_pre,
......@@ -3808,7 +3725,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
FLD_VAL(exit_hs_mode_lat, 15, 0);
dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
dsi_write_reg(dsi, DSI_VM_TIMING7, r);
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
......@@ -3842,23 +3759,23 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
vsa, vm->vactive);
r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
r = dsi_read_reg(dsi, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
r = FLD_MOD(r, hfp, 23, 12); /* HFP */
r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
dsi_write_reg(dsi, DSI_VM_TIMING1, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
r = dsi_read_reg(dsi, DSI_VM_TIMING2);
r = FLD_MOD(r, vbp, 7, 0); /* VBP */
r = FLD_MOD(r, vfp, 15, 8); /* VFP */
r = FLD_MOD(r, vsa, 23, 16); /* VSA */
r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
dsi_write_reg(dsi, DSI_VM_TIMING2, r);
r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
r = dsi_read_reg(dsi, DSI_VM_TIMING3);
r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
dsi_write_reg(dsi, DSI_VM_TIMING3, r);
}
}
......@@ -3945,7 +3862,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
return -ENODEV;
}
r = dsi_display_init_dispc(dsidev, dispc_channel);
r = dsi_display_init_dispc(dsi, dispc_channel);
if (r)
goto err_init_dispc;
......@@ -3968,19 +3885,19 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
goto err_pix_fmt;
}
dsi_if_enable(dsidev, false);
dsi_vc_enable(dsidev, channel, false);
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, channel, false);
/* MODE, 1 = video mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
dsi_vc_write_long_header(dsidev, channel, data_type,
dsi_vc_write_long_header(dsi, channel, data_type,
word_count, 0);
dsi_vc_enable(dsidev, channel, true);
dsi_if_enable(dsidev, true);
dsi_vc_enable(dsi, channel, true);
dsi_if_enable(dsi, true);
}
r = dss_mgr_enable(dispc_channel);
......@@ -3991,11 +3908,11 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsidev, false);
dsi_vc_enable(dsidev, channel, false);
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, channel, false);
}
err_pix_fmt:
dsi_display_uninit_dispc(dsidev, dispc_channel);
dsi_display_uninit_dispc(dsi, dispc_channel);
err_init_dispc:
return r;
}
......@@ -4007,24 +3924,23 @@ static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel
enum omap_channel dispc_channel = dssdev->dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_if_enable(dsidev, false);
dsi_vc_enable(dsidev, channel, false);
dsi_if_enable(dsi, false);
dsi_vc_enable(dsi, channel, false);
/* MODE, 0 = command mode */
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
dsi_vc_enable(dsidev, channel, true);
dsi_if_enable(dsidev, true);
dsi_vc_enable(dsi, channel, true);
dsi_if_enable(dsi, true);
}
dss_mgr_disable(dispc_channel);
dsi_display_uninit_dispc(dsidev, dispc_channel);
dsi_display_uninit_dispc(dsi, dispc_channel);
}
static void dsi_update_screen_dispc(struct platform_device *dsidev)
static void dsi_update_screen_dispc(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
enum omap_channel dispc_channel = dsi->output.dispc_channel;
unsigned int bytespp;
unsigned int bytespl;
......@@ -4041,7 +3957,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
......@@ -4062,16 +3978,16 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
dsi_write_reg(dsi, DSI_VC_TE(channel), l);
dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
dsi_write_reg(dsi, DSI_VC_TE(channel), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
......@@ -4081,7 +3997,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
*/
dispc_disable_sidle();
dsi_perf_mark_start(dsidev);
dsi_perf_mark_start(dsi);
r = schedule_delayed_work(&dsi->framedone_timeout_work,
msecs_to_jiffies(250));
......@@ -4094,9 +4010,9 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
dsi_vc_send_bta(dsidev, channel);
dsi_vc_send_bta(dsi, channel);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
......@@ -4111,22 +4027,20 @@ static void dsi_te_timeout(struct timer_list *unused)
}
#endif
static void dsi_handle_framedone(struct platform_device *dsidev, int error)
static void dsi_handle_framedone(struct dsi_data *dsi, int error)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
if (dsi->te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
dsi->framedone_callback(error, dsi->framedone_data);
if (!error)
dsi_perf_show(dsidev, "DISPC");
dsi_perf_show(dsi, "DISPC");
}
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
......@@ -4142,13 +4056,12 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
DSSERR("Framedone not received for 250ms!\n");
dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
dsi_handle_framedone(dsi, -ETIMEDOUT);
}
static void dsi_framedone_irq_callback(void *data)
{
struct platform_device *dsidev = (struct platform_device *) data;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dsi_data *dsi = data;
/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
* turns itself off. However, DSI still has the pixels in its buffers,
......@@ -4157,7 +4070,7 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
dsi_handle_framedone(dsidev, 0);
dsi_handle_framedone(dsi, 0);
}
static int dsi_update(struct omap_dss_device *dssdev, int channel,
......@@ -4167,7 +4080,7 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 dw, dh;
dsi_perf_mark_setup(dsidev);
dsi_perf_mark_setup(dsi);
dsi->update_channel = channel;
......@@ -4181,21 +4094,20 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
dsi_update_screen_dispc(dsidev);
dsi_update_screen_dispc(dsi);
return 0;
}
/* Display funcs */
static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dispc_clock_info dispc_cinfo;
int r;
unsigned long fck;
fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
fck = dsi_get_pll_hsdiv_dispc_rate(dsi);
dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
......@@ -4211,10 +4123,9 @@ static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
return 0;
}
static int dsi_display_init_dispc(struct platform_device *dsidev,
enum omap_channel channel)
static int dsi_display_init_dispc(struct dsi_data *dsi,
enum omap_channel channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
......@@ -4223,7 +4134,7 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
r = dss_mgr_register_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
dsi_framedone_irq_callback, dsi);
if (r) {
DSSERR("can't register FRAMEDONE handler\n");
goto err;
......@@ -4254,7 +4165,7 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dss_mgr_set_timings(channel, &dsi->vm);
r = dsi_configure_dispc_clocks(dsidev);
r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
......@@ -4269,27 +4180,24 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
dsi_framedone_irq_callback, dsi);
err:
dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
return r;
}
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
enum omap_channel channel)
static void dsi_display_uninit_dispc(struct dsi_data *dsi,
enum omap_channel channel)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
dsi_framedone_irq_callback, dsi);
dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info cinfo;
int r;
......@@ -4304,16 +4212,15 @@ static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
return 0;
}
static int dsi_display_init_dsi(struct platform_device *dsidev)
static int dsi_display_init_dsi(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dss_pll_enable(&dsi->pll);
if (r)
goto err0;
r = dsi_configure_dsi_clocks(dsidev);
r = dsi_configure_dsi_clocks(dsi);
if (r)
goto err1;
......@@ -4323,33 +4230,33 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
DSSDBG("PLL OK\n");
r = dsi_cio_init(dsidev);
r = dsi_cio_init(dsi);
if (r)
goto err2;
_dsi_print_reset_status(dsidev);
_dsi_print_reset_status(dsi);
dsi_proto_timings(dsidev);
dsi_set_lp_clk_divisor(dsidev);
dsi_proto_timings(dsi);
dsi_set_lp_clk_divisor(dsi);
if (1)
_dsi_print_reset_status(dsidev);
_dsi_print_reset_status(dsi);
r = dsi_proto_config(dsidev);
r = dsi_proto_config(dsi);
if (r)
goto err3;
/* enable interface */
dsi_vc_enable(dsidev, 0, 1);
dsi_vc_enable(dsidev, 1, 1);
dsi_vc_enable(dsidev, 2, 1);
dsi_vc_enable(dsidev, 3, 1);
dsi_if_enable(dsidev, 1);
dsi_force_tx_stop_mode_io(dsidev);
dsi_vc_enable(dsi, 0, 1);
dsi_vc_enable(dsi, 1, 1);
dsi_vc_enable(dsi, 2, 1);
dsi_vc_enable(dsi, 3, 1);
dsi_if_enable(dsi, 1);
dsi_force_tx_stop_mode_io(dsi);
return 0;
err3:
dsi_cio_uninit(dsidev);
dsi_cio_uninit(dsi);
err2:
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err1:
......@@ -4358,24 +4265,22 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
return r;
}
static void dsi_display_uninit_dsi(struct platform_device *dsidev,
bool disconnect_lanes, bool enter_ulps)
static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
bool enter_ulps)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enter_ulps && !dsi->ulps_enabled)
dsi_enter_ulps(dsidev);
dsi_enter_ulps(dsi);
/* disable interface */
dsi_if_enable(dsidev, 0);
dsi_vc_enable(dsidev, 0, 0);
dsi_vc_enable(dsidev, 1, 0);
dsi_vc_enable(dsidev, 2, 0);
dsi_vc_enable(dsidev, 3, 0);
dsi_if_enable(dsi, 0);
dsi_vc_enable(dsi, 0, 0);
dsi_vc_enable(dsi, 1, 0);
dsi_vc_enable(dsi, 2, 0);
dsi_vc_enable(dsi, 3, 0);
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
dsi_pll_uninit(dsidev, disconnect_lanes);
dsi_cio_uninit(dsi);
dsi_pll_uninit(dsi, disconnect_lanes);
}
static int dsi_display_enable(struct omap_dss_device *dssdev)
......@@ -4386,17 +4291,17 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
DSSDBG("dsi_display_enable\n");
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
r = dsi_runtime_get(dsidev);
r = dsi_runtime_get(dsi);
if (r)
goto err_get_dsi;
_dsi_initialize_irq(dsidev);
_dsi_initialize_irq(dsi);
r = dsi_display_init_dsi(dsidev);
r = dsi_display_init_dsi(dsi);
if (r)
goto err_init_dsi;
......@@ -4405,7 +4310,7 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
return 0;
err_init_dsi:
dsi_runtime_put(dsidev);
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
......@@ -4420,18 +4325,18 @@ static void dsi_display_disable(struct omap_dss_device *dssdev,
DSSDBG("dsi_display_disable\n");
WARN_ON(!dsi_bus_is_locked(dsidev));
WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
dsi_sync_vc(dsidev, 0);
dsi_sync_vc(dsidev, 1);
dsi_sync_vc(dsidev, 2);
dsi_sync_vc(dsidev, 3);
dsi_sync_vc(dsi, 0);
dsi_sync_vc(dsi, 1);
dsi_sync_vc(dsi, 2);
dsi_sync_vc(dsi, 3);
dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
dsi_runtime_put(dsidev);
dsi_runtime_put(dsi);
mutex_unlock(&dsi->lock);
}
......@@ -4568,7 +4473,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
......@@ -4604,7 +4509,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
txbyteclk = pck * bitspp / 8 / ndl;
memset(ctx, 0, sizeof(*ctx));
ctx->dsidev = dsi->pdev;
ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
ctx->req_pck_min = pck;
......@@ -4621,7 +4526,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
int bitspp = dsi_get_pixel_size(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
......@@ -4868,7 +4773,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
......@@ -4895,7 +4800,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
clkin = clk_get_rate(dsi->pll.clkin);
memset(ctx, 0, sizeof(*ctx));
ctx->dsidev = dsi->pdev;
ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
......@@ -5068,12 +4973,11 @@ static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
}
static int dsi_get_clocks(struct platform_device *dsidev)
static int dsi_get_clocks(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct clk *clk;
clk = devm_clk_get(&dsidev->dev, "fck");
clk = devm_clk_get(&dsi->pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
return PTR_ERR(clk);
......@@ -5088,10 +4992,11 @@ static int dsi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
enum omap_channel dispc_channel = dssdev->dispc_channel;
int r;
r = dsi_regulator_init(dsidev);
r = dsi_regulator_init(dsi);
if (r)
return r;
......@@ -5164,12 +5069,11 @@ static const struct omapdss_dsi_ops dsi_ops = {
.set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
};
static void dsi_init_output(struct platform_device *dsidev)
static void dsi_init_output(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
out->dev = &dsidev->dev;
out->dev = &dsi->pdev->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
......@@ -5182,18 +5086,16 @@ static void dsi_init_output(struct platform_device *dsidev)
omapdss_register_output(out);
}
static void dsi_uninit_output(struct platform_device *dsidev)
static void dsi_uninit_output(struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
omapdss_unregister_output(out);
}
static int dsi_probe_of(struct platform_device *pdev)
static int dsi_probe_of(struct dsi_data *dsi)
{
struct device_node *node = pdev->dev.of_node;
struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
struct device_node *node = dsi->pdev->dev.of_node;
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
......@@ -5207,7 +5109,7 @@ static int dsi_probe_of(struct platform_device *pdev)
prop = of_find_property(ep, "lanes", &len);
if (prop == NULL) {
dev_err(&pdev->dev, "failed to find lane data\n");
dev_err(&dsi->pdev->dev, "failed to find lane data\n");
r = -EINVAL;
goto err;
}
......@@ -5216,14 +5118,14 @@ static int dsi_probe_of(struct platform_device *pdev)
if (num_pins < 4 || num_pins % 2 != 0 ||
num_pins > dsi->num_lanes_supported * 2) {
dev_err(&pdev->dev, "bad number of lanes\n");
dev_err(&dsi->pdev->dev, "bad number of lanes\n");
r = -EINVAL;
goto err;
}
r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
if (r) {
dev_err(&pdev->dev, "failed to read lane data\n");
dev_err(&dsi->pdev->dev, "failed to read lane data\n");
goto err;
}
......@@ -5233,7 +5135,7 @@ static int dsi_probe_of(struct platform_device *pdev)
r = dsi_configure_pins(&dsi->output, &pin_cfg);
if (r) {
dev_err(&pdev->dev, "failed to configure pins");
dev_err(&dsi->pdev->dev, "failed to configure pins");
goto err;
}
......@@ -5333,15 +5235,13 @@ static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
.has_refsel = true,
};
static int dsi_init_pll_data(struct dss_device *dss,
struct platform_device *dsidev)
static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll *pll = &dsi->pll;
struct clk *clk;
int r;
clk = devm_clk_get(&dsidev->dev, "sys_clk");
clk = devm_clk_get(&dsi->pdev->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
......@@ -5487,7 +5387,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
}
r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
IRQF_SHARED, dev_name(&dsidev->dev), dsi);
if (r < 0) {
DSSERR("request_irq failed\n");
return r;
......@@ -5535,19 +5435,19 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->vc[i].vc_id = 0;
}
r = dsi_get_clocks(dsidev);
r = dsi_get_clocks(dsi);
if (r)
return r;
dsi_init_pll_data(dss, dsidev);
dsi_init_pll_data(dss, dsi);
pm_runtime_enable(&dsidev->dev);
r = dsi_runtime_get(dsidev);
r = dsi_runtime_get(dsi);
if (r)
goto err_runtime_get;
rev = dsi_read_reg(dsidev, DSI_REVISION);
rev = dsi_read_reg(dsi, DSI_REVISION);
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
......@@ -5555,15 +5455,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ)
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
else
dsi->num_lanes_supported = 3;
dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
dsi_init_output(dsidev);
dsi_init_output(dsi);
r = dsi_probe_of(dsidev);
r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
goto err_probe_of;
......@@ -5573,7 +5473,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
if (r)
DSSERR("Failed to populate DSI child devices: %d\n", r);
dsi_runtime_put(dsidev);
dsi_runtime_put(dsi);
if (dsi->module_id == 0)
dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs",
......@@ -5597,8 +5497,8 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_probe_of:
dsi_uninit_output(dsidev);
dsi_runtime_put(dsidev);
dsi_uninit_output(dsi);
dsi_runtime_put(dsi);
err_runtime_get:
pm_runtime_disable(&dsidev->dev);
......@@ -5619,7 +5519,7 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
dss_pll_unregister(&dsi->pll);
dsi_uninit_output(dsidev);
dsi_uninit_output(dsi);
pm_runtime_disable(&dsidev->dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册