提交 c51f7167 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next

- fbc improvements when stolen memory is tight (Ben)
- cdclk handling improvements for vlv/chv (Ville)
- proper fix for stuck primary planes on gmch platforms with cxsr (Imre&Ebgert
  Eich)
- gen8 hw semaphore support (Ben)
- more execlist prep work from Oscar Mateo
- locking fixes for primary planes (Matt Roper)
- code rework to support runtime pm for dpms on hsw/bdw (Paulo, Imre & me), but
  not yet enabled because some fixes from Paulo haven't made the cut
- more gpu boost tuning from Chris
- as usual piles of little things all over

* tag 'drm-intel-next-2014-07-11' of git://anongit.freedesktop.org/drm-intel: (93 commits)
  drm/i915: Make the RPS interrupt generation mask handle the vlv wa
  drm/i915: Move RPS evaluation interval counters to i915->rps
  drm/i915: Don't cast a pointer to void* unnecessarily
  drm/i915: don't read LVDS regs at compute_config time
  drm/i915: check the power domains in intel_lvds_get_hw_state()
  drm/i915: check the power domains in ironlake_get_pipe_config()
  drm/i915: don't skip shared DPLL assertion on LPT
  drm/i915: Only touch WRPLL hw state in enable/disable hooks
  drm/i915: Switch to common shared dpll framework for WRPLLs
  drm/i915: ->enable hook for WRPLLs
  drm/i915: ->disable hook for WRPLLs
  drm/i915: State readout support for WRPLLs
  drm/i915: add POWER_DOMAIN_PLLS
  drm/i915: Document that the pll->mode_set hook is optional
  drm/i915: Basic shared dpll support for WRPLLs
  drm/i915: Precompute static ddi_pll_sel values in encoders
  drm/i915: BDW also has special-purpose DP DDI clocks
  drm/i915: State readout and cross-checking for ddi_pll_sel
  drm/i915: Move ddi_pll_sel into the pipe config
  drm/i915: Add a debugfs file for the shared dpll state
  ...
...@@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -176,7 +176,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
static void describe_ctx(struct seq_file *m, struct intel_context *ctx) static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{ {
seq_putc(m, ctx->is_initialized ? 'I' : 'i'); seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' '); seq_putc(m, ' ');
} }
...@@ -994,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, ...@@ -994,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set, i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n"); "0x%llx\n");
static int i915_rstdby_delays(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u16 crstanddelay;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
crstanddelay = I915_READ16(CRSTANDVID);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
return 0;
}
static int i915_frequency_info(struct seq_file *m, void *unused) static int i915_frequency_info(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -1158,61 +1135,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1158,61 +1135,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret; return ret;
} }
static int i915_delayfreq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 delayfreq;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static inline int MAP_TO_MV(int map)
{
return 1250 - (map * 25);
}
static int i915_inttoext_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 inttoext;
int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int ironlake_drpc_info(struct seq_file *m) static int ironlake_drpc_info(struct seq_file *m)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -1523,10 +1445,17 @@ static int i915_ips_status(struct seq_file *m, void *unused) ...@@ -1523,10 +1445,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE) seq_printf(m, "Enabled by kernel parameter: %s\n",
seq_puts(m, "enabled\n"); yesno(i915.enable_ips));
else
seq_puts(m, "disabled\n"); if (INTEL_INFO(dev)->gen >= 8) {
seq_puts(m, "Currently: unknown\n");
} else {
if (I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "Currently: enabled\n");
else
seq_puts(m, "Currently: disabled\n");
}
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -1630,26 +1559,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1630,26 +1559,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return ret; return ret;
} }
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_opregion(struct seq_file *m, void *unused) static int i915_opregion(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -1746,7 +1655,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1746,7 +1655,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
} }
list_for_each_entry(ctx, &dev_priv->context_list, link) { list_for_each_entry(ctx, &dev_priv->context_list, link) {
if (ctx->obj == NULL) if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue; continue;
seq_puts(m, "HW context "); seq_puts(m, "HW context ");
...@@ -1755,7 +1664,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1755,7 +1664,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx) if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name); seq_printf(m, "(default context %s) ", ring->name);
describe_obj(m, ctx->obj); describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
...@@ -1869,7 +1778,7 @@ static int per_file_ctx(int id, void *ptr, void *data) ...@@ -1869,7 +1778,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
if (i915_gem_context_is_default(ctx)) if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n"); seq_puts(m, " default context:\n");
else else
seq_printf(m, " context %d:\n", ctx->id); seq_printf(m, " context %d:\n", ctx->user_handle);
ppgtt->debug_dump(ppgtt, m); ppgtt->debug_dump(ppgtt, m);
return 0; return 0;
...@@ -2134,6 +2043,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) ...@@ -2134,6 +2043,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "VGA"; return "VGA";
case POWER_DOMAIN_AUDIO: case POWER_DOMAIN_AUDIO:
return "AUDIO"; return "AUDIO";
case POWER_DOMAIN_PLLS:
return "PLLS";
case POWER_DOMAIN_INIT: case POWER_DOMAIN_INIT:
return "INIT"; return "INIT";
default: default:
...@@ -2358,17 +2269,17 @@ static int i915_display_info(struct seq_file *m, void *unused) ...@@ -2358,17 +2269,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
bool active; bool active;
int x, y; int x, y;
seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe), crtc->base.base.id, pipe_name(crtc->pipe),
yesno(crtc->active)); yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
if (crtc->active) { if (crtc->active) {
intel_crtc_info(m, crtc); intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y); active = cursor_position(dev, crtc->pipe, &x, &y);
seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base), yesno(crtc->cursor_base),
x, y, crtc->cursor_addr, x, y, crtc->cursor_width, crtc->cursor_height,
yesno(active)); crtc->cursor_addr, yesno(active));
} }
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
...@@ -2388,6 +2299,104 @@ static int i915_display_info(struct seq_file *m, void *unused) ...@@ -2388,6 +2299,104 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
int i, j, ret;
if (!i915_semaphore_is_enabled(dev)) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
if (IS_BROADWELL(dev)) {
struct page *page;
uint64_t *seqno;
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_ring(ring, dev_priv, i) {
uint64_t offset;
seq_printf(m, "%s\n", ring->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
offset = i * I915_NUM_RINGS + j;
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
seq_putc(m, '\n');
seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) {
offset = i + (j * I915_NUM_RINGS);
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
seq_putc(m, '\n');
}
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
for_each_ring(ring, dev_priv, i)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(ring->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < num_rings; j++) {
seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
}
seq_putc(m, '\n');
}
seq_putc(m, '\n');
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
drm_modeset_lock_all(dev);
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
pll->active, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
return 0;
}
struct pipe_crc_info { struct pipe_crc_info {
const char *name; const char *name;
struct drm_device *dev; struct drm_device *dev;
...@@ -2860,7 +2869,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, ...@@ -2860,7 +2869,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0; return 0;
} }
static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
drm_modeset_lock_all(dev);
/*
* If we use the eDP transcoder we need to make sure that we don't
* bypass the pfit, since otherwise the pipe CRC source won't work. Only
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
!crtc->config.pch_pfit.enabled) {
crtc->config.pch_pfit.force_thru = true;
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
dev_priv->display.crtc_disable(&crtc->base);
dev_priv->display.crtc_enable(&crtc->base);
}
drm_modeset_unlock_all(dev);
}
static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
drm_modeset_lock_all(dev);
/*
* If we use the eDP transcoder we need to make sure that we don't
* bypass the pfit, since otherwise the pipe CRC source won't work. Only
* relevant on hsw with pipe A when using the always-on power well
* routing.
*/
if (crtc->config.pch_pfit.force_thru) {
crtc->config.pch_pfit.force_thru = false;
dev_priv->display.crtc_disable(&crtc->base);
dev_priv->display.crtc_enable(&crtc->base);
intel_display_power_put(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
}
drm_modeset_unlock_all(dev);
}
static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
enum pipe pipe,
enum intel_pipe_crc_source *source,
uint32_t *val) uint32_t *val)
{ {
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
...@@ -2874,6 +2936,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, ...@@ -2874,6 +2936,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break; break;
case INTEL_PIPE_CRC_SOURCE_PF: case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break; break;
case INTEL_PIPE_CRC_SOURCE_NONE: case INTEL_PIPE_CRC_SOURCE_NONE:
...@@ -2906,11 +2971,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, ...@@ -2906,11 +2971,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (INTEL_INFO(dev)->gen < 5) else if (INTEL_INFO(dev)->gen < 5)
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val); ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_GEN5(dev) || IS_GEN6(dev)) else if (IS_GEN5(dev) || IS_GEN6(dev))
ret = ilk_pipe_crc_ctl_reg(&source, &val); ret = ilk_pipe_crc_ctl_reg(&source, &val);
else else
ret = ivb_pipe_crc_ctl_reg(&source, &val); ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -2962,6 +3027,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, ...@@ -2962,6 +3027,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
g4x_undo_pipe_scramble_reset(dev, pipe); g4x_undo_pipe_scramble_reset(dev, pipe);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe); vlv_undo_pipe_scramble_reset(dev, pipe);
else if (IS_HASWELL(dev) && pipe == PIPE_A)
hsw_undo_trans_edp_pipe_A_crc_wa(dev);
} }
return 0; return 0;
...@@ -3815,14 +3882,10 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -3815,14 +3882,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_frequency_info", i915_frequency_info, 0}, {"i915_frequency_info", i915_frequency_info, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0}, {"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0}, {"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0}, {"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0}, {"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0}, {"i915_sr_status", i915_sr_status, 0},
...@@ -3839,6 +3902,8 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -3839,6 +3902,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_pc8_status", i915_pc8_status, 0}, {"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0}, {"i915_display_info", i915_display_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
}; };
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
......
...@@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1593,7 +1593,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL) if (dev_priv == NULL)
return -ENOMEM; return -ENOMEM;
dev->dev_private = (void *)dev_priv; dev->dev_private = dev_priv;
dev_priv->dev = dev; dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */ /* copy initial configuration to dev_priv->info */
...@@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev) ...@@ -1954,11 +1954,11 @@ void i915_driver_lastclose(struct drm_device *dev)
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
} }
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{ {
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv); i915_gem_context_close(dev, file);
i915_gem_release(dev, file_priv); i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -477,10 +477,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0) if (i915.semaphores >= 0)
return i915.semaphores; return i915.semaphores;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
...@@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -520,6 +516,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error; return error;
} }
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev); intel_runtime_pm_disable_interrupts(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev);
...@@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -541,10 +539,11 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev); i915_save_state(dev);
if (acpi_target_system_state() >= ACPI_STATE_S3) opregion_target_state = PCI_D3cold;
opregion_target_state = PCI_D3cold; #if IS_ENABLED(CONFIG_ACPI_SLEEP)
else if (acpi_target_system_state() < ACPI_STATE_S3)
opregion_target_state = PCI_D1; opregion_target_state = PCI_D1;
#endif
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev, false);
......
...@@ -129,6 +129,7 @@ enum intel_display_power_domain { ...@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA, POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO, POWER_DOMAIN_AUDIO,
POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT, POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM, POWER_DOMAIN_NUM,
...@@ -184,8 +185,10 @@ struct i915_mmu_object; ...@@ -184,8 +185,10 @@ struct i915_mmu_object;
enum intel_dpll_id { enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */ /* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B, DPLL_ID_PCH_PLL_B = 1,
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
}; };
#define I915_NUM_PLLS 2 #define I915_NUM_PLLS 2
...@@ -194,6 +197,7 @@ struct intel_dpll_hw_state { ...@@ -194,6 +197,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md; uint32_t dpll_md;
uint32_t fp0; uint32_t fp0;
uint32_t fp1; uint32_t fp1;
uint32_t wrpll;
}; };
struct intel_shared_dpll { struct intel_shared_dpll {
...@@ -204,6 +208,8 @@ struct intel_shared_dpll { ...@@ -204,6 +208,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */ /* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id; enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state; struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv, void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll); struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv, void (*enable)(struct drm_i915_private *dev_priv,
...@@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, ...@@ -228,12 +234,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock, int pixel_clock, int link_clock,
struct intel_link_m_n *m_n); struct intel_link_m_n *m_n);
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
/* Interface history: /* Interface history:
* *
* 1.1: Original. * 1.1: Original.
...@@ -324,6 +324,7 @@ struct drm_i915_error_state { ...@@ -324,6 +324,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES]; u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay; struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display; struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring { struct drm_i915_error_ring {
bool valid; bool valid;
...@@ -584,27 +585,48 @@ struct i915_ctx_hang_stats { ...@@ -584,27 +585,48 @@ struct i915_ctx_hang_stats {
}; };
/* This must match up with the value previously used for execbuf2.rsvd1. */ /* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0 #define DEFAULT_CONTEXT_HANDLE 0
/**
* struct intel_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
* @file_priv: filp associated with this context (NULL for global default
* context).
* @hang_stats: information about the role of this context in possible GPU
* hangs.
* @vm: virtual memory space used by this context.
* @legacy_hw_ctx: render context backing object and whether it is correctly
* initialized (legacy ring submission mechanism only).
* @link: link in the global list of contexts.
*
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
struct intel_context { struct intel_context {
struct kref ref; struct kref ref;
int id; int user_handle;
bool is_initialized;
uint8_t remap_slice; uint8_t remap_slice;
struct drm_i915_file_private *file_priv; struct drm_i915_file_private *file_priv;
struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats; struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm; struct i915_address_space *vm;
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
struct list_head link; struct list_head link;
}; };
struct i915_fbc { struct i915_fbc {
unsigned long size; unsigned long size;
unsigned threshold;
unsigned int fb_id; unsigned int fb_id;
enum plane plane; enum plane plane;
int y; int y;
struct drm_mm_node *compressed_fb; struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb; struct drm_mm_node *compressed_llb;
struct intel_fbc_work { struct intel_fbc_work {
...@@ -880,6 +902,12 @@ struct vlv_s0ix_state { ...@@ -880,6 +902,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2; u32 clock_gate_dis2;
}; };
struct intel_rps_ei {
u32 cz_clock;
u32 render_c0;
u32 media_c0;
};
struct intel_gen6_power_mgmt { struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work; struct work_struct work;
...@@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt { ...@@ -904,12 +932,17 @@ struct intel_gen6_power_mgmt {
u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */ u8 rp0_freq; /* Non-overclocked max frequency. */
u32 ei_interrupt_count;
int last_adj; int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power; enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled; bool enabled;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
/* /*
* Protects RPS/RC6 register access and PCU communication. * Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested. * Must be taken after struct_mutex if nested.
...@@ -1374,6 +1407,7 @@ struct drm_i915_private { ...@@ -1374,6 +1407,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS]; struct intel_engine_cs ring[I915_NUM_RINGS];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno; uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah; drm_dma_handle_t *status_page_dmah;
...@@ -1480,7 +1514,6 @@ struct drm_i915_private { ...@@ -1480,7 +1514,6 @@ struct drm_i915_private {
int num_shared_dpll; int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */ /* Reclocking support */
...@@ -1557,6 +1590,11 @@ struct drm_i915_private { ...@@ -1557,6 +1590,11 @@ struct drm_i915_private {
struct i915_runtime_pm pm; struct i915_runtime_pm pm;
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
u32 long_hpd_port_mask;
u32 short_hpd_port_mask;
struct work_struct dig_port_work;
/* Old dri1 support infrastructure, beware the dragons ya fools entering /* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */ * here! */
struct i915_dri1_state dri1; struct i915_dri1_state dri1;
...@@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev); ...@@ -2097,12 +2135,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev); extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *); extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev); extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev, extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev, extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv); struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev); extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
...@@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx) ...@@ -2457,7 +2495,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c) static inline bool i915_gem_context_is_default(const struct intel_context *c)
{ {
return c->id == DEFAULT_CONTEXT_ID; return c->user_handle == DEFAULT_CONTEXT_HANDLE;
} }
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
...@@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) ...@@ -2488,7 +2526,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */ /* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev); int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev); void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object * struct drm_i915_gem_object *
...@@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val); ...@@ -2647,6 +2685,8 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev);
......
...@@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, ...@@ -1168,7 +1168,7 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0; timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) { if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv); gen6_rps_boost(dev_priv);
if (file_priv) if (file_priv)
mod_delayed_work(dev_priv->wq, mod_delayed_work(dev_priv->wq,
...@@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2330,7 +2330,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start; u32 request_ring_position, request_start;
int ret; int ret;
request_start = intel_ring_get_tail(ring); request_start = intel_ring_get_tail(ring->buffer);
/* /*
* Emit any outstanding flushes - execbuf can fail to emit the flush * Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix * after having emitted the batchbuffer command. Hence we need to fix
...@@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring, ...@@ -2351,7 +2351,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
request_ring_position = intel_ring_get_tail(ring); request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring); ret = ring->add_request(ring);
if (ret) if (ret)
...@@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -2842,6 +2842,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to); idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno; seqno = obj->last_read_seqno;
/* Optimization: Avoid semaphore sync when we are sure we already
* waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx]) if (seqno <= from->semaphore.sync_seqno[idx])
return 0; return 0;
......
...@@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -182,14 +182,14 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref); typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL; struct i915_hw_ppgtt *ppgtt = NULL;
if (ctx->obj) { if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */ /* We refcount even the aliasing PPGTT to keep the code symmetric */
if (USES_PPGTT(ctx->obj->base.dev)) if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx); ppgtt = ctx_to_ppgtt(ctx);
/* XXX: Free up the object before tearing down the address space, in /* XXX: Free up the object before tearing down the address space, in
* case we're bound in the PPGTT */ * case we're bound in the PPGTT */
drm_gem_object_unreference(&ctx->obj->base); drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
} }
if (ppgtt) if (ppgtt)
...@@ -198,6 +198,36 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -198,6 +198,36 @@ void i915_gem_context_free(struct kref *ctx_ref)
kfree(ctx); kfree(ctx);
} }
static struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*/
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
drm_gem_object_unreference(&obj->base);
return ERR_PTR(ret);
}
}
return obj;
}
static struct i915_hw_ppgtt * static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx) create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{ {
...@@ -234,40 +264,26 @@ __create_hw_context(struct drm_device *dev, ...@@ -234,40 +264,26 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list); list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) { if (dev_priv->hw_context_size) {
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); struct drm_i915_gem_object *obj =
if (ctx->obj == NULL) { i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
ret = -ENOMEM; if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out; goto err_out;
} }
ctx->legacy_hw_ctx.rcs_state = obj;
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*/
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
ret = i915_gem_object_set_cache_level(ctx->obj,
I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
}
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
if (file_priv != NULL) { if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx, ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_ID, 0, GFP_KERNEL); DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto err_out; goto err_out;
} else } else
ret = DEFAULT_CONTEXT_ID; ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv; ctx->file_priv = file_priv;
ctx->id = ret; ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there * loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */ * is no remap info, it will be a NOP. */
...@@ -301,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -301,7 +317,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
if (is_global_default_ctx && ctx->obj) { if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which /* We may need to do things with the shrinker which
* require us to immediately switch back to the default * require us to immediately switch back to the default
* context. This can cause a problem as pinning the * context. This can cause a problem as pinning the
...@@ -309,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -309,7 +325,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default * be available. To avoid this we always pin the default
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->obj, ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0); get_context_alignment(dev), 0);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
...@@ -349,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -349,8 +365,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx; return ctx;
err_unpin: err_unpin:
if (is_global_default_ctx && ctx->obj) if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->obj); i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy: err_destroy:
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -366,23 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -366,23 +382,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context; struct intel_context *dctx = ring->default_context;
struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */ /* Do a fake switch to the default context */
if (ring->last_context == dctx) if (lctx == dctx)
continue; continue;
if (!ring->last_context) if (!lctx)
continue; continue;
if (dctx->obj && i == RCS) { if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0)); get_context_alignment(dev), 0));
/* Fake a finish/inactive */ /* Fake a finish/inactive */
dctx->obj->base.write_domain = 0; dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
dctx->obj->active = 0; dctx->legacy_hw_ctx.rcs_state->active = 0;
} }
i915_gem_context_unreference(ring->last_context); if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx); i915_gem_context_reference(dctx);
ring->last_context = dctx; ring->last_context = dctx;
} }
...@@ -429,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -429,7 +449,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context; struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i; int i;
if (dctx->obj) { if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is /* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing * to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */ * other code, leading to spurious errors. */
...@@ -444,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev) ...@@ -444,13 +464,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context); WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) { if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */ /* Fake switch to NULL context */
WARN_ON(dctx->obj->active); WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx); i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL; dev_priv->ring[RCS].last_context = NULL;
} }
i915_gem_object_ggtt_unpin(dctx->obj); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
} }
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
...@@ -570,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -570,7 +590,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN | MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN |
...@@ -602,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -602,8 +622,8 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i; int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) { if (from != NULL && ring == &dev_priv->ring[RCS]) {
BUG_ON(from->obj == NULL); BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->obj)); BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
} }
if (from == to && !to->remap_slice) if (from == to && !to->remap_slice)
...@@ -611,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -611,7 +631,7 @@ static int do_switch(struct intel_engine_cs *ring,
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) { if (ring == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->obj, ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0); get_context_alignment(ring->dev), 0);
if (ret) if (ret)
return ret; return ret;
...@@ -644,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -644,17 +664,17 @@ static int do_switch(struct intel_engine_cs *ring,
* *
* XXX: We need a real interface to do this instead of trickery. * XXX: We need a real interface to do this instead of trickery.
*/ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false); ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (!to->obj->has_global_gtt_mapping) { if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base); &dev_priv->gtt.base);
vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
} }
if (!to->is_initialized || i915_gem_context_is_default(to)) if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT; hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags); ret = mi_set_context(ring, to, hw_flags);
...@@ -680,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -680,8 +700,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -689,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -689,16 +709,16 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->obj->dirty = 1; from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->obj->ring != ring); BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->obj); i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from); i915_gem_context_unreference(from);
} }
uninitialized = !to->is_initialized && from == NULL; uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
to->is_initialized = true; to->legacy_hw_ctx.initialized = true;
done: done:
i915_gem_context_reference(to); i915_gem_context_reference(to);
...@@ -714,7 +734,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -714,7 +734,7 @@ static int do_switch(struct intel_engine_cs *ring,
unpin_out: unpin_out:
if (ring->id == RCS) if (ring->id == RCS)
i915_gem_object_ggtt_unpin(to->obj); i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret; return ret;
} }
...@@ -735,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring, ...@@ -735,7 +755,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (to->obj == NULL) { /* We have the fake context */ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) { if (to != ring->last_context) {
i915_gem_context_reference(to); i915_gem_context_reference(to);
if (ring->last_context) if (ring->last_context)
...@@ -773,7 +793,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ...@@ -773,7 +793,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
args->ctx_id = ctx->id; args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id); DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0; return 0;
...@@ -787,7 +807,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -787,7 +807,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx; struct intel_context *ctx;
int ret; int ret;
if (args->ctx_id == DEFAULT_CONTEXT_ID) if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT; return -ENOENT;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
...@@ -800,7 +820,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -800,7 +820,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx); return PTR_ERR(ctx);
} }
idr_remove(&ctx->file_priv->context_idr, ctx->id); idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL; struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs; struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id); ctx = i915_gem_context_get(file->driver_priv, ctx_id);
...@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0; return 0;
} }
static int
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_len;
int instp_mode;
u32 instp_mask;
int i, ret = 0;
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto error;
}
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto error;
}
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
if (ret)
goto error;
ret = i915_switch_context(ring, ctx);
if (ret)
goto error;
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
ret = -EINVAL;
goto error;
}
if (instp_mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
ret = -EINVAL;
goto error;
}
if (INTEL_INFO(dev)->gen > 5 &&
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
ret = -EINVAL;
goto error;
}
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
ret = -EINVAL;
goto error;
}
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto error;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = instp_mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto error;
}
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto error;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
return ret;
}
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
error:
kfree(cliprects);
return ret;
}
/** /**
* Find one BSD ring to dispatch the corresponding BSD command. * Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned. * The Ring ID is returned.
...@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
struct intel_context *ctx; struct intel_context *ctx;
struct i915_address_space *vm; struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args); const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u64 exec_start = args->batch_start_offset, exec_len; u64 exec_start = args->batch_start_offset;
u32 mask, flags; u32 flags;
int ret, mode, i; int ret;
bool need_relocs; bool need_relocs;
if (!i915_gem_check_execbuffer(args)) if (!i915_gem_check_execbuffer(args))
...@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
if (mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("no rel constants on pre-gen4\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
}
/* The HW changed the meaning on this bit on gen6 */
if (INTEL_INFO(dev)->gen >= 6)
mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
return -EINVAL;
}
if (args->buffer_count < 1) { if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL; return -EINVAL;
} }
if (args->num_cliprects != 0) {
if (ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
return -EINVAL;
}
cliprects = kcalloc(args->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
if (copy_from_user(cliprects,
to_user_ptr(args->cliprects_ptr),
sizeof(*cliprects)*args->num_cliprects)) {
ret = -EFAULT;
goto pre_mutex_err;
}
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
...@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else else
exec_start += i915_gem_obj_offset(batch_obj, vm); exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
args, &eb->vmas, batch_obj, exec_start, flags);
if (ret) if (ret)
goto err; goto err;
ret = i915_switch_context(ring, ctx);
if (ret)
goto err;
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM);
intel_ring_emit(ring, mask << 16 | mode);
intel_ring_advance(ring);
dev_priv->relative_constants_mode = mode;
}
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(dev, ring);
if (ret)
goto err;
}
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err: err:
/* the request owns the ref now */ /* the request owns the ref now */
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
...@@ -1385,8 +1413,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1385,8 +1413,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
pre_mutex_err: pre_mutex_err:
kfree(cliprects);
/* intel_gpu_busy should also get a ref, so it will free when the device /* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */ * is really idle. */
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
......
...@@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -103,30 +103,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base; return base;
} }
static int i915_setup_compression(struct drm_device *dev, int size) static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); int compression_threshold = 1;
int ret; int ret;
compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); /* HACK: This code depends on what we will do in *_enable_fbc. If that
if (!compressed_fb) * code changes, this code needs to change as well.
goto err_llb; *
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
/* Try to over-allocate to reduce reallocations and fragmentation */ /* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret) if (ret == 0)
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, return compression_threshold;
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT); again:
if (ret) /* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb; goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) { else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start); I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else { } else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb) if (!compressed_llb)
...@@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -140,13 +178,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb; dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE, I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start); dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE, I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start); dev_priv->mm.stolen_base + compressed_llb->start);
} }
dev_priv->fbc.compressed_fb = compressed_fb; dev_priv->fbc.size = size / dev_priv->fbc.threshold;
dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size); size);
...@@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -155,14 +192,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb: err_fb:
kfree(compressed_llb); kfree(compressed_llb);
drm_mm_remove_node(compressed_fb); drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb: err_llb:
kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC; return -ENOSPC;
} }
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) ...@@ -175,7 +211,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */ /* Release any current block */
i915_gem_stolen_cleanup_compression(dev); i915_gem_stolen_cleanup_compression(dev);
return i915_setup_compression(dev, size); return i915_setup_compression(dev, size, fb_cpp);
} }
void i915_gem_stolen_cleanup_compression(struct drm_device *dev) void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
...@@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) ...@@ -185,10 +221,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0) if (dev_priv->fbc.size == 0)
return; return;
if (dev_priv->fbc.compressed_fb) { drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
drm_mm_remove_node(dev_priv->fbc.compressed_fb);
kfree(dev_priv->fbc.compressed_fb);
}
if (dev_priv->fbc.compressed_llb) { if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb); drm_mm_remove_node(dev_priv->fbc.compressed_llb);
......
...@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev; struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt; int i, j, offset, elt;
int max_hangcheck_score; int max_hangcheck_score;
...@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]); error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) { for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
obj = error->ring[i].batchbuffer; obj = error->ring[i].batchbuffer;
if (obj) { if (obj) {
err_puts(m, dev_priv->ring[i].name); err_puts(m, dev_priv->ring[i].name);
...@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
} }
} }
if ((obj = error->semaphore_obj)) {
err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
elt * 4,
obj->pages[0][elt],
obj->pages[0][elt+1],
obj->pages[0][elt+2],
obj->pages[0][elt+3]);
}
}
if (error->overlay) if (error->overlay)
intel_overlay_print_error_state(m, error->overlay); intel_overlay_print_error_state(m, error->overlay);
...@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref) ...@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests); kfree(error->ring[i].requests);
} }
i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo); kfree(error->active_bo);
kfree(error->overlay); kfree(error->overlay);
kfree(error->display); kfree(error->display);
...@@ -746,7 +758,52 @@ static void i915_gem_record_fences(struct drm_device *dev, ...@@ -746,7 +758,52 @@ static void i915_gem_record_fences(struct drm_device *dev,
} }
} }
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
struct intel_engine_cs *useless;
int i;
if (!i915_semaphore_is_enabled(dev_priv->dev))
return;
if (!error->semaphore_obj)
error->semaphore_obj =
i915_error_object_create(dev_priv,
dev_priv->semaphore_obj,
&dev_priv->gtt.base);
for_each_ring(useless, dev_priv, i) {
u16 signal_offset =
(GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
u32 *tmp = error->semaphore_obj->pages[0];
ering->semaphore_mboxes[i] = tmp[signal_offset];
ering->semaphore_seqno[i] = ring->semaphore.sync_seqno[i];
}
}
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv->dev)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
}
}
static void i915_record_ring_state(struct drm_device *dev, static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_engine_cs *ring, struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
...@@ -755,18 +812,10 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -755,18 +812,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
ering->semaphore_mboxes[0] if (INTEL_INFO(dev)->gen >= 8)
= I915_READ(RING_SYNC_0(ring->mmio_base)); gen8_record_semaphore_state(dev_priv, error, ring, ering);
ering->semaphore_mboxes[1] else
= I915_READ(RING_SYNC_1(ring->mmio_base)); gen6_record_semaphore_state(dev_priv, ring, ering);
ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
}
if (HAS_VEBOX(dev)) {
ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(ring->mmio_base));
ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
...@@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -895,7 +944,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true; error->ring[i].valid = true;
i915_record_ring_state(dev, ring, &error->ring[i]); i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring); request = i915_gem_find_active_request(ring);
if (request) { if (request) {
......
...@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev, ...@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
return true; return true;
} }
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, dig_port_work);
unsigned long irqflags;
u32 long_port_mask, short_port_mask;
struct intel_digital_port *intel_dig_port;
int i, ret;
u32 old_bits = 0;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
long_port_mask = dev_priv->long_hpd_port_mask;
dev_priv->long_hpd_port_mask = 0;
short_port_mask = dev_priv->short_hpd_port_mask;
dev_priv->short_hpd_port_mask = 0;
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
for (i = 0; i < I915_MAX_PORTS; i++) {
bool valid = false;
bool long_hpd = false;
intel_dig_port = dev_priv->hpd_irq_port[i];
if (!intel_dig_port || !intel_dig_port->hpd_pulse)
continue;
if (long_port_mask & (1 << i)) {
valid = true;
long_hpd = true;
} else if (short_port_mask & (1 << i))
valid = true;
if (valid) {
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
if (ret == true) {
/* if we get true fallback to old school hpd */
old_bits |= (1 << intel_dig_port->base.hpd_pin);
}
}
}
if (old_bits) {
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
dev_priv->hpd_event_bits |= old_bits;
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
schedule_work(&dev_priv->hotplug_work);
}
}
/* /*
* Handle hotplug events outside the interrupt handler proper. * Handle hotplug events outside the interrupt handler proper.
*/ */
...@@ -1221,6 +1268,131 @@ static void notify_ring(struct drm_device *dev, ...@@ -1221,6 +1268,131 @@ static void notify_ring(struct drm_device *dev,
i915_queue_hangcheck(dev); i915_queue_hangcheck(dev);
} }
static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
struct intel_rps_ei *rps_ei)
{
u32 cz_ts, cz_freq_khz;
u32 render_count, media_count;
u32 elapsed_render, elapsed_media, elapsed_time;
u32 residency = 0;
cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
if (rps_ei->cz_clock == 0) {
rps_ei->cz_clock = cz_ts;
rps_ei->render_c0 = render_count;
rps_ei->media_c0 = media_count;
return dev_priv->rps.cur_freq;
}
elapsed_time = cz_ts - rps_ei->cz_clock;
rps_ei->cz_clock = cz_ts;
elapsed_render = render_count - rps_ei->render_c0;
rps_ei->render_c0 = render_count;
elapsed_media = media_count - rps_ei->media_c0;
rps_ei->media_c0 = media_count;
/* Convert all the counters into common unit of milli sec */
elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
elapsed_render /= cz_freq_khz;
elapsed_media /= cz_freq_khz;
/*
* Calculate overall C0 residency percentage
* only if elapsed time is non zero
*/
if (elapsed_time) {
residency =
((max(elapsed_render, elapsed_media) * 100)
/ elapsed_time);
}
return residency;
}
/**
* vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
* busy-ness calculated from C0 counters of render & media power wells
* @dev_priv: DRM device private
*
*/
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
u8 new_delay, adj;
dev_priv->rps.ei_interrupt_count++;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if (dev_priv->rps.up_ei.cz_clock == 0) {
vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
return dev_priv->rps.cur_freq;
}
/*
* To down throttle, C0 residency should be less than down threshold
* for continous EI intervals. So calculate down EI counters
* once in VLV_INT_COUNT_FOR_DOWN_EI
*/
if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
dev_priv->rps.ei_interrupt_count = 0;
residency_C0_down = vlv_c0_residency(dev_priv,
&dev_priv->rps.down_ei);
} else {
residency_C0_up = vlv_c0_residency(dev_priv,
&dev_priv->rps.up_ei);
}
new_delay = dev_priv->rps.cur_freq;
adj = dev_priv->rps.last_adj;
/* C0 residency is greater than UP threshold. Increase Frequency */
if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
if (adj > 0)
adj *= 2;
else
adj = 1;
if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
if (new_delay < dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
} else if (!dev_priv->rps.ei_interrupt_count &&
(residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
if (adj < 0)
adj *= 2;
else
adj = -1;
/*
* This means, C0 residency is less than down threshold over
* a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
*/
if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.cur_freq + adj;
}
return new_delay;
}
static void gen6_pm_rps_work(struct work_struct *work) static void gen6_pm_rps_work(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
...@@ -1269,6 +1441,8 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1269,6 +1441,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
else else
new_delay = dev_priv->rps.min_freq_softlimit; new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0; adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0) if (adj < 0)
adj *= 2; adj *= 2;
...@@ -1517,23 +1691,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, ...@@ -1517,23 +1691,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5 #define HPD_STORM_THRESHOLD 5
static int ilk_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
case PORT_E:
default:
return -1;
case PORT_B:
return 0;
case PORT_C:
return 8;
case PORT_D:
return 16;
}
}
static int g4x_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
case PORT_E:
default:
return -1;
case PORT_B:
return 17;
case PORT_C:
return 19;
case PORT_D:
return 21;
}
}
static inline enum port get_port_from_pin(enum hpd_pin pin)
{
switch (pin) {
case HPD_PORT_B:
return PORT_B;
case HPD_PORT_C:
return PORT_C;
case HPD_PORT_D:
return PORT_D;
default:
return PORT_A; /* no hpd */
}
}
static inline void intel_hpd_irq_handler(struct drm_device *dev, static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger, u32 hotplug_trigger,
u32 dig_hotplug_reg,
const u32 *hpd) const u32 *hpd)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
enum port port;
bool storm_detected = false; bool storm_detected = false;
bool queue_dig = false, queue_hp = false;
u32 dig_shift;
u32 dig_port_mask = 0;
if (!hotplug_trigger) if (!hotplug_trigger)
return; return;
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
hotplug_trigger); hotplug_trigger, dig_hotplug_reg);
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) { for (i = 1; i < HPD_NUM_PINS; i++) {
if (!(hpd[i] & hotplug_trigger))
continue;
port = get_port_from_pin(i);
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
if (IS_G4X(dev)) {
dig_shift = g4x_port_to_hotplug_shift(port);
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
dig_shift = ilk_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
dev_priv->long_hpd_port_mask |= (1 << port);
dig_port_mask |= hpd[i];
} else {
/* for short HPD just trigger the digital queue */
dev_priv->short_hpd_port_mask |= (1 << port);
hotplug_trigger &= ~hpd[i];
}
queue_dig = true;
}
}
for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger && if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/* /*
...@@ -1553,7 +1808,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, ...@@ -1553,7 +1808,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue; continue;
dev_priv->hpd_event_bits |= (1 << i); if (!(dig_port_mask & hpd[i])) {
dev_priv->hpd_event_bits |= (1 << i);
queue_hp = true;
}
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
...@@ -1582,7 +1841,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, ...@@ -1582,7 +1841,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
* queue for otherwise the flush_work in the pageflip code will * queue for otherwise the flush_work in the pageflip code will
* deadlock. * deadlock.
*/ */
schedule_work(&dev_priv->hotplug_work); if (queue_dig)
schedule_work(&dev_priv->dig_port_work);
if (queue_hp)
schedule_work(&dev_priv->hotplug_work);
} }
static void gmbus_irq_handler(struct drm_device *dev) static void gmbus_irq_handler(struct drm_device *dev)
...@@ -1823,11 +2085,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) ...@@ -1823,11 +2085,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
if (IS_G4X(dev)) { if (IS_G4X(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
} else { } else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
} }
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
...@@ -1925,8 +2187,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) ...@@ -1925,8 +2187,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
u32 dig_hotplug_reg;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) { if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
...@@ -2032,8 +2298,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) ...@@ -2032,8 +2298,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
u32 dig_hotplug_reg;
intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
...@@ -2780,12 +3050,7 @@ static bool ...@@ -2780,12 +3050,7 @@ static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{ {
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
/* return (ipehr >> 23) == 0x1c;
* FIXME: gen8 semaphore support - currently we don't emit
* semaphores on bdw anyway, but this needs to be addressed when
* we merge that code.
*/
return false;
} else { } else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK; ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
...@@ -2794,19 +3059,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) ...@@ -2794,19 +3059,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
} }
static struct intel_engine_cs * static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
int i; int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) { if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
/* for_each_ring(signaller, dev_priv, i) {
* FIXME: gen8 semaphore support - currently we don't emit if (ring == signaller)
* semaphores on bdw anyway, but this needs to be addressed when continue;
* we merge that code.
*/ if (offset == signaller->semaphore.signal_ggtt[ring->id])
return NULL; return signaller;
}
} else { } else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
...@@ -2819,8 +3085,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) ...@@ -2819,8 +3085,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
} }
} }
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
ring->id, ipehr); ring->id, ipehr, offset);
return NULL; return NULL;
} }
...@@ -2830,7 +3096,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2830,7 +3096,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head; u32 cmd, ipehr, head;
int i; u64 offset = 0;
int i, backwards;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
...@@ -2839,13 +3106,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2839,13 +3106,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
/* /*
* HEAD is likely pointing to the dword after the actual command, * HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3 * so scan backwards until we find the MBOX. But limit it to just 3
* dwords. Note that we don't care about ACTHD here since that might * or 4 dwords depending on the semaphore wait command size.
* Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the * point at at batch, and semaphores are always emitted into the
* ringbuffer itself. * ringbuffer itself.
*/ */
head = I915_READ_HEAD(ring) & HEAD_ADDR; head = I915_READ_HEAD(ring) & HEAD_ADDR;
backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
for (i = 4; i; --i) { for (i = backwards; i; --i) {
/* /*
* Be paranoid and presume the hw has gone off into the wild - * Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence * our ring is smaller than what the hardware (and hence
...@@ -2865,7 +3134,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) ...@@ -2865,7 +3134,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
return NULL; return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
return semaphore_wait_to_signaller_ring(ring, ipehr); if (INTEL_INFO(ring->dev)->gen >= 8) {
offset = ioread32(ring->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(ring->buffer->virtual_start + head + 8);
}
return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
} }
static int semaphore_passed(struct intel_engine_cs *ring) static int semaphore_passed(struct intel_engine_cs *ring)
...@@ -4354,12 +4628,17 @@ void intel_irq_init(struct drm_device *dev) ...@@ -4354,12 +4628,17 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */ /* Let's track the enabled rps events */
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; if (IS_VALLEYVIEW(dev))
/* WaGsvRC0ResidenncyMethod:VLV */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer, setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed, i915_hangcheck_elapsed,
......
...@@ -240,7 +240,7 @@ ...@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) #define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) #define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) #define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21) #define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20) #define MI_SEMAPHORE_COMPARE (1<<20)
...@@ -266,6 +266,11 @@ ...@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2) #define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1) #define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0) #define MI_RESTORE_INHIBIT (1<<0)
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
...@@ -360,6 +365,7 @@ ...@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8) #define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
...@@ -525,6 +531,7 @@ enum punit_power_well { ...@@ -525,6 +531,7 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8 #define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0) #define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ #define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
...@@ -550,6 +557,11 @@ enum punit_power_well { ...@@ -550,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
#define VLV_RP_UP_EI_THRESHOLD 90
#define VLV_RP_DOWN_EI_THRESHOLD 70
#define VLV_INT_COUNT_FOR_DOWN_EI 5
/* vlv2 north clock has */ /* vlv2 north clock has */
#define CCK_FUSE_REG 0x8 #define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3 #define CCK_FUSE_HPLL_FREQ_MASK 0x3
...@@ -584,6 +596,11 @@ enum punit_power_well { ...@@ -584,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b #define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/** /**
* DOC: DPIO * DOC: DPIO
...@@ -5383,6 +5400,7 @@ enum punit_power_well { ...@@ -5383,6 +5400,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1) #define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) #define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) #define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1 #define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2 #define FORCEWAKE_USER 0x2
...@@ -5530,6 +5548,8 @@ enum punit_power_well { ...@@ -5530,6 +5548,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104 #define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104 #define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15) #define VLV_COUNT_RANGE_HIGH (1<<15)
#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1) #define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0) #define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108 #define GEN6_GT_GFX_RC6 0x138108
...@@ -5538,6 +5558,8 @@ enum punit_power_well { ...@@ -5538,6 +5558,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110 #define GEN6_GT_GFX_RC6pp 0x138110
#define VLV_RENDER_C0_COUNT_REG 0x138118
#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31) #define GEN6_PCODE_READY (1<<31)
...@@ -5772,6 +5794,7 @@ enum punit_power_well { ...@@ -5772,6 +5794,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31) #define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28) #define TRANS_DDI_PORT_MASK (7<<28)
#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28) #define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24) #define TRANS_DDI_MODE_SELECT_MASK (7<<24)
...@@ -5899,10 +5922,12 @@ enum punit_power_well { ...@@ -5899,10 +5922,12 @@ enum punit_power_well {
/* WRPLL */ /* WRPLL */
#define WRPLL_CTL1 0x46040 #define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060 #define WRPLL_CTL2 0x46060
#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28) #define WRPLL_PLL_SSC (1<<28)
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28) #define WRPLL_PLL_NON_SSC (2<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) #define WRPLL_PLL_LCPLL (3<<28)
#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */ /* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff) #define WRPLL_DIVIDER_REF_MASK (0xff)
...@@ -5921,6 +5946,7 @@ enum punit_power_well { ...@@ -5921,6 +5946,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29) #define PORT_CLK_SEL_NONE (7<<29)
...@@ -5962,7 +5988,10 @@ enum punit_power_well { ...@@ -5962,7 +5988,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21) #define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9) #define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8) #define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0) #define D_COMP_COMP_DISABLE (1<<0)
......
...@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, ...@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder); pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
} }
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
POSTING_READ(SPLL_CTL);
udelay(20);
}
/* Note: The caller is required to filter out dpms modes not supported by the /* Note: The caller is required to filter out dpms modes not supported by the
* platform. */ * platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
...@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder) ...@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
} }
static void hsw_crt_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
static void intel_enable_crt(struct intel_encoder *encoder) static void intel_enable_crt(struct intel_encoder *encoder)
{ {
struct intel_crt *crt = intel_encoder_to_crt(encoder); struct intel_crt *crt = intel_encoder_to_crt(encoder);
...@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, ...@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24; pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */ /* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) if (HAS_DDI(dev)) {
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2; pipe_config->port_clock = 135000 * 2;
}
return true; return true;
} }
...@@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev) ...@@ -860,6 +888,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config; crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state; crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.pre_enable = hsw_crt_pre_enable;
crt->base.post_disable = hsw_crt_post_disable;
} else { } else {
crt->base.get_config = intel_crt_get_config; crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state; crt->base.get_hw_state = intel_crt_get_hw_state;
......
...@@ -277,7 +277,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) ...@@ -277,7 +277,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */ /* Configure Port Clock Select */
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel); I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis, /* Start the training iterating through available voltages and emphasis,
* testing each value twice. */ * testing each value twice. */
...@@ -385,53 +386,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) ...@@ -385,53 +386,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret; return ret;
} }
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t val;
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
plls->spll_refcount--;
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
break;
case PORT_CLK_SEL_WRPLL1:
plls->wrpll1_refcount--;
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 1\n");
val = I915_READ(WRPLL_CTL1);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL1);
}
break;
case PORT_CLK_SEL_WRPLL2:
plls->wrpll2_refcount--;
if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 2\n");
val = I915_READ(WRPLL_CTL2);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL2);
}
break;
}
WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
}
#define LC_FREQ 2700 #define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000) #define LC_FREQ_2K (LC_FREQ * 2000)
...@@ -592,9 +546,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, ...@@ -592,9 +546,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
u32 wrpll; u32 wrpll;
wrpll = I915_READ(reg); wrpll = I915_READ(reg);
switch (wrpll & SPLL_PLL_REF_MASK) { switch (wrpll & WRPLL_PLL_REF_MASK) {
case SPLL_PLL_SSC: case WRPLL_PLL_SSC:
case SPLL_PLL_NON_SSC: case WRPLL_PLL_NON_SSC:
/* /*
* We could calculate spread here, but our checking * We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of * code only cares about 5% accuracy, and spread is a max of
...@@ -602,7 +556,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, ...@@ -602,7 +556,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/ */
refclk = 135; refclk = 135;
break; break;
case SPLL_PLL_LCPLL: case WRPLL_PLL_LCPLL:
refclk = LC_FREQ; refclk = LC_FREQ;
break; break;
default: default:
...@@ -622,11 +576,10 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, ...@@ -622,11 +576,10 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
int link_clock = 0; int link_clock = 0;
u32 val, pll; u32 val, pll;
val = I915_READ(PORT_CLK_SEL(port)); val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) { switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810: case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000; link_clock = 81000;
...@@ -750,173 +703,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) ...@@ -750,173 +703,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{ {
struct drm_crtc *crtc = &intel_crtc->base; struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type; int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock; int clock = intel_crtc->config.port_clock;
intel_ddi_put_crtc_pll(crtc); intel_put_shared_dpll(intel_crtc);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
default:
DRM_ERROR("Link bandwidth %d unsupported\n",
intel_dp->link_bw);
return false;
}
} else if (type == INTEL_OUTPUT_HDMI) { if (type == INTEL_OUTPUT_HDMI) {
uint32_t reg, val; struct intel_shared_dpll *pll;
uint32_t val;
unsigned p, n2, r2; unsigned p, n2, r2;
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 | val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p); WRPLL_DIVIDER_POST(p);
if (val == I915_READ(WRPLL_CTL1)) { intel_crtc->config.dpll_hw_state.wrpll = val;
DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL1;
} else if (val == I915_READ(WRPLL_CTL2)) {
DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL2;
} else if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
reg = WRPLL_CTL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
clock, p, n2, r2);
if (reg == WRPLL_CTL1) {
plls->wrpll1_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else {
plls->wrpll2_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
}
} else if (type == INTEL_OUTPUT_ANALOG) { pll = intel_get_shared_dpll(intel_crtc);
if (plls->spll_refcount == 0) { if (pll == NULL) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n", DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe)); pipe_name(intel_crtc->pipe));
plls->spll_refcount++;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
} else {
DRM_ERROR("SPLL already in use\n");
return false; return false;
} }
} else { intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
} }
return true; return true;
} }
/*
* To be called after intel_ddi_pll_select(). That one selects the PLL to be
* used, this one actually enables the PLL.
*/
void intel_ddi_pll_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int clock = crtc->config.port_clock;
uint32_t reg, cur_val, new_val;
int refcount;
const char *pll_name;
uint32_t enable_bit = (1 << 31);
unsigned int p, n2, r2;
BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
switch (crtc->ddi_pll_sel) {
case PORT_CLK_SEL_LCPLL_2700:
case PORT_CLK_SEL_LCPLL_1350:
case PORT_CLK_SEL_LCPLL_810:
/*
* LCPLL should always be enabled at this point of the mode set
* sequence, so nothing to do.
*/
return;
case PORT_CLK_SEL_SPLL:
pll_name = "SPLL";
reg = SPLL_CTL;
refcount = plls->spll_refcount;
new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SSC;
break;
case PORT_CLK_SEL_WRPLL1:
case PORT_CLK_SEL_WRPLL2:
if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
pll_name = "WRPLL1";
reg = WRPLL_CTL1;
refcount = plls->wrpll1_refcount;
} else {
pll_name = "WRPLL2";
reg = WRPLL_CTL2;
refcount = plls->wrpll2_refcount;
}
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
break;
case PORT_CLK_SEL_NONE:
WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
return;
default:
WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
return;
}
cur_val = I915_READ(reg);
WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
if (refcount == 1) {
WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
I915_WRITE(reg, new_val);
POSTING_READ(reg);
udelay(20);
} else {
WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
}
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{ {
struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct drm_i915_private *dev_priv = crtc->dev->dev_private;
...@@ -995,7 +812,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) ...@@ -995,7 +812,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not * eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't * using motion blur mitigation (which we don't
* support). */ * support). */
if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled) if (IS_HASWELL(dev) &&
(intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else else
temp |= TRANS_DDI_EDP_INPUT_A_ON; temp |= TRANS_DDI_EDP_INPUT_A_ON;
...@@ -1146,76 +965,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, ...@@ -1146,76 +965,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return false; return false;
} }
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
uint32_t temp, ret;
enum port port = I915_MAX_PORTS;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int i;
if (cpu_transcoder == TRANSCODER_EDP) {
port = PORT_A;
} else {
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
temp &= TRANS_DDI_PORT_MASK;
for (i = PORT_B; i <= PORT_E; i++)
if (temp == TRANS_DDI_SELECT_PORT(i))
port = i;
}
if (port == I915_MAX_PORTS) {
WARN(1, "Pipe %c enabled on an unknown port\n",
pipe_name(pipe));
ret = PORT_CLK_SEL_NONE;
} else {
ret = I915_READ(PORT_CLK_SEL(port));
DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
"0x%08x\n", pipe_name(pipe), port_name(port),
ret);
}
return ret;
}
void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *intel_crtc;
dev_priv->ddi_plls.spll_refcount = 0;
dev_priv->ddi_plls.wrpll1_refcount = 0;
dev_priv->ddi_plls.wrpll2_refcount = 0;
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (!intel_crtc->active) {
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
continue;
}
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
dev_priv->ddi_plls.spll_refcount++;
break;
case PORT_CLK_SEL_WRPLL1:
dev_priv->ddi_plls.wrpll1_refcount++;
break;
case PORT_CLK_SEL_WRPLL2:
dev_priv->ddi_plls.wrpll2_refcount++;
break;
}
}
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{ {
struct drm_crtc *crtc = &intel_crtc->base; struct drm_crtc *crtc = &intel_crtc->base;
...@@ -1261,8 +1010,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) ...@@ -1261,8 +1010,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp); intel_edp_panel_on(intel_dp);
} }
WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel); I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
...@@ -1418,10 +1167,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) ...@@ -1418,10 +1167,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
} }
} }
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
val = I915_READ(WRPLL_CTL(pll->id));
I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL(pll->id));
}
static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
return val & WRPLL_PLL_ENABLE;
}
static char *hsw_ddi_pll_names[] = {
"WRPLL 1",
"WRPLL 2",
};
void intel_ddi_pll_init(struct drm_device *dev) void intel_ddi_pll_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL); uint32_t val = I915_READ(LCPLL_CTL);
int i;
dev_priv->num_shared_dpll = 2;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
dev_priv->shared_dplls[i].get_hw_state =
hsw_ddi_pll_get_hw_state;
}
/* The LCPLL register should be turned on by the BIOS. For now let's /* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong. * just check its state and print errors in case something is wrong.
...@@ -1705,6 +1504,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -1705,6 +1504,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->cloneable = 0; intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug; intel_encoder->hot_plug = intel_ddi_hot_plug;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
if (init_dp) if (init_dp)
dp_connector = intel_ddi_init_dp_connector(intel_dig_port); dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
......
...@@ -1094,11 +1094,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, ...@@ -1094,11 +1094,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state; bool cur_state;
struct intel_dpll_hw_state hw_state; struct intel_dpll_hw_state hw_state;
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
return;
}
if (WARN (!pll, if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state))) "asserting DPLL %s with no DPLL\n", state_string(state)))
return; return;
...@@ -1514,9 +1509,6 @@ static void intel_reset_dpio(struct drm_device *dev) ...@@ -1514,9 +1509,6 @@ static void intel_reset_dpio(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_VALLEYVIEW(dev))
return;
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy; enum dpio_phy phy;
u32 val; u32 val;
...@@ -1538,26 +1530,6 @@ static void intel_reset_dpio(struct drm_device *dev) ...@@ -1538,26 +1530,6 @@ static void intel_reset_dpio(struct drm_device *dev)
I915_WRITE(DISPLAY_PHY_CONTROL, I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val)); PHY_COM_LANE_RESET_DEASSERT(phy, val));
} }
} else {
/*
* If DPIO has already been reset, e.g. by BIOS, just skip all
* this.
*/
if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
__vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
false);
__vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
true);
} }
} }
...@@ -1837,12 +1809,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc) ...@@ -1837,12 +1809,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
} }
WARN_ON(pll->on); WARN_ON(pll->on);
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
DRM_DEBUG_KMS("enabling %s\n", pll->name); DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll); pll->enable(dev_priv, pll);
pll->on = true; pll->on = true;
} }
static void intel_disable_shared_dpll(struct intel_crtc *crtc) void intel_disable_shared_dpll(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -1873,6 +1847,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc) ...@@ -1873,6 +1847,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("disabling %s\n", pll->name); DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll); pll->disable(dev_priv, pll);
pll->on = false; pll->on = false;
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
} }
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
...@@ -2219,6 +2195,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2219,6 +2195,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment; u32 alignment;
int ret; int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
switch (obj->tiling_mode) { switch (obj->tiling_mode) {
case I915_TILING_NONE: case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
...@@ -2275,6 +2253,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -2275,6 +2253,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{ {
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin_from_display_plane(obj); i915_gem_object_unpin_from_display_plane(obj);
} }
...@@ -2379,7 +2359,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2379,7 +2359,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev; struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c; struct drm_crtc *c;
struct intel_crtc *i; struct intel_crtc *i;
struct intel_framebuffer *fb; struct drm_i915_gem_object *obj;
if (!intel_crtc->base.primary->fb) if (!intel_crtc->base.primary->fb)
return; return;
...@@ -2400,14 +2380,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2400,14 +2380,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (c == &intel_crtc->base) if (c == &intel_crtc->base)
continue; continue;
if (!i->active || !c->primary->fb) if (!i->active)
continue; continue;
fb = to_intel_framebuffer(c->primary->fb); obj = intel_fb_obj(c->primary->fb);
if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) { if (obj == NULL)
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb); drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb; intel_crtc->base.primary->fb = c->primary->fb;
fb->obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break; break;
} }
} }
...@@ -2420,16 +2403,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, ...@@ -2420,16 +2403,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane; int plane = intel_crtc->plane;
unsigned long linear_offset; unsigned long linear_offset;
u32 dspcntr; u32 dspcntr;
u32 reg; u32 reg;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
reg = DSPCNTR(plane); reg = DSPCNTR(plane);
dspcntr = I915_READ(reg); dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */ /* Mask out pixel format bits in case we change it */
...@@ -2510,16 +2489,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, ...@@ -2510,16 +2489,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane; int plane = intel_crtc->plane;
unsigned long linear_offset; unsigned long linear_offset;
u32 dspcntr; u32 dspcntr;
u32 reg; u32 reg;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
reg = DSPCNTR(plane); reg = DSPCNTR(plane);
dspcntr = I915_READ(reg); dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */ /* Mask out pixel format bits in case we change it */
...@@ -2650,7 +2625,7 @@ void intel_display_handle_reset(struct drm_device *dev) ...@@ -2650,7 +2625,7 @@ void intel_display_handle_reset(struct drm_device *dev)
static int static int
intel_finish_fb(struct drm_framebuffer *old_fb) intel_finish_fb(struct drm_framebuffer *old_fb)
{ {
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible; bool was_interruptible = dev_priv->mm.interruptible;
int ret; int ret;
...@@ -2697,8 +2672,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2697,8 +2672,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb; struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret; int ret;
if (intel_crtc_has_pending_flip(crtc)) { if (intel_crtc_has_pending_flip(crtc)) {
...@@ -2719,12 +2695,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2719,12 +2695,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL; return -EINVAL;
} }
old_fb = crtc->primary->fb;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret == 0) if (ret == 0)
i915_gem_track_fb(to_intel_framebuffer(old_fb)->obj, obj, i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe)); INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret != 0) { if (ret != 0) {
...@@ -2776,7 +2750,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -2776,7 +2750,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (intel_crtc->active && old_fb != fb) if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
...@@ -3642,7 +3616,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) ...@@ -3642,7 +3616,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
} }
static void intel_put_shared_dpll(struct intel_crtc *crtc) void intel_put_shared_dpll(struct intel_crtc *crtc)
{ {
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
...@@ -3662,7 +3636,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc) ...@@ -3662,7 +3636,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE; crtc->config.shared_dpll = DPLL_ID_PRIVATE;
} }
static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{ {
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
...@@ -3915,30 +3889,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) ...@@ -3915,30 +3889,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/ */
} }
/**
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
* cursor plane briefly if not already running after enabling the display
* plane.
* This workaround avoids occasional blank screens when self refresh is
* enabled.
*/
static void
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 cntl = I915_READ(CURCNTR(pipe));
if ((cntl & CURSOR_MODE) == 0) {
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
intel_wait_for_vblank(dev_priv->dev, pipe);
I915_WRITE(CURCNTR(pipe), cntl);
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
}
}
static void intel_crtc_enable_planes(struct drm_crtc *crtc) static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
...@@ -3951,9 +3901,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) ...@@ -3951,9 +3901,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
intel_enable_primary_hw_plane(dev_priv, plane, pipe); intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc); intel_enable_planes(crtc);
/* The fixup needs to happen before cursor is enabled */
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true); intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true); intel_crtc_dpms_overlay(intel_crtc, true);
...@@ -4128,6 +4075,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) ...@@ -4128,6 +4075,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active) if (intel_crtc->active)
return; return;
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config.has_dp_encoder) if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc); intel_dp_set_m_n(intel_crtc);
...@@ -4152,16 +4102,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) ...@@ -4152,16 +4102,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true; intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
if (intel_crtc->config.has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder) for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable) if (encoder->pre_enable)
encoder->pre_enable(encoder); encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
dev_priv->display.fdi_link_train(crtc);
}
intel_ddi_enable_pipe_clock(intel_crtc); intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc); ironlake_pfit_enable(intel_crtc);
...@@ -4299,22 +4248,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) ...@@ -4299,22 +4248,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc); intel_ddi_disable_pipe_clock(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
if (intel_crtc->config.has_pch_encoder) { if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv); lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc); intel_ddi_fdi_disable(crtc);
} }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
intel_crtc->active = false; intel_crtc->active = false;
intel_update_watermarks(crtc); intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev); intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (intel_crtc_to_shared_dpll(intel_crtc))
intel_disable_shared_dpll(intel_crtc);
} }
static void ironlake_crtc_off(struct drm_crtc *crtc) static void ironlake_crtc_off(struct drm_crtc *crtc)
...@@ -4323,10 +4275,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc) ...@@ -4323,10 +4275,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_shared_dpll(intel_crtc); intel_put_shared_dpll(intel_crtc);
} }
static void haswell_crtc_off(struct drm_crtc *crtc)
{
intel_ddi_put_crtc_pll(crtc);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc) static void i9xx_pfit_enable(struct intel_crtc *crtc)
{ {
...@@ -4398,7 +4346,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) ...@@ -4398,7 +4346,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask; unsigned long mask;
enum transcoder transcoder; enum transcoder transcoder;
...@@ -4406,7 +4353,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) ...@@ -4406,7 +4353,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe)); mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (pfit_enabled) if (intel_crtc->config.pch_pfit.enabled ||
intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder) for_each_encoder_on_crtc(dev, crtc, intel_encoder)
...@@ -4463,7 +4411,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) ...@@ -4463,7 +4411,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false); intel_display_set_init_power(dev_priv, false);
} }
int valleyview_get_vco(struct drm_i915_private *dev_priv) /* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{ {
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
...@@ -4473,7 +4422,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv) ...@@ -4473,7 +4422,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
CCK_FUSE_HPLL_FREQ_MASK; CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
return vco_freq[hpll_freq]; return vco_freq[hpll_freq] * 1000;
}
static void vlv_update_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
dev_priv->vlv_cdclk_freq);
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
} }
/* Adjust CDclk dividers to allow high res or save power if possible */ /* Adjust CDclk dividers to allow high res or save power if possible */
...@@ -4482,12 +4447,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) ...@@ -4482,12 +4447,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd; u32 val, cmd;
WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq); WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
dev_priv->vlv_cdclk_freq = cdclk;
if (cdclk >= 320) /* jump to highest voltage for 400MHz too */ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2; cmd = 2;
else if (cdclk == 266) else if (cdclk == 266667)
cmd = 1; cmd = 1;
else else
cmd = 0; cmd = 0;
...@@ -4504,18 +4468,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) ...@@ -4504,18 +4468,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
} }
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
if (cdclk == 400) { if (cdclk == 400000) {
u32 divider, vco; u32 divider, vco;
vco = valleyview_get_vco(dev_priv); vco = valleyview_get_vco(dev_priv);
divider = ((vco << 1) / cdclk) - 1; divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock); mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */ /* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~0xf; val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider; val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
50))
DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
} }
...@@ -4528,54 +4497,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) ...@@ -4528,54 +4497,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
* For high bandwidth configs, we set a higher latency in the bunit * For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns. * so that the core display fetch happens in time to avoid underruns.
*/ */
if (cdclk == 400) if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */ val |= 4500 / 250; /* 4.5 usec */
else else
val |= 3000 / 250; /* 3.0 usec */ val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock); mutex_unlock(&dev_priv->dpio_lock);
/* Since we changed the CDclk, we need to update the GMBUSFREQ too */ vlv_update_cdclk(dev);
intel_i2c_reset(dev);
}
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
{
int cur_cdclk, vco;
int divider;
vco = valleyview_get_vco(dev_priv);
mutex_lock(&dev_priv->dpio_lock);
divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
divider &= 0xf;
cur_cdclk = (vco << 1) / (divider + 1);
return cur_cdclk;
} }
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk) int max_pixclk)
{ {
int vco = valleyview_get_vco(dev_priv);
int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
/* /*
* Really only a few cases to deal with, as only 4 CDclks are supported: * Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz * 200MHz
* 267MHz * 267MHz
* 320MHz * 320/333MHz (depends on HPLL freq)
* 400MHz * 400MHz
* So we check to see whether we're above 90% of the lower bin and * So we check to see whether we're above 90% of the lower bin and
* adjust if needed. * adjust if needed.
*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/ */
if (max_pixclk > 288000) { if (max_pixclk > freq_320*9/10)
return 400; return 400000;
} else if (max_pixclk > 240000) { else if (max_pixclk > 266667*9/10)
return 320; return freq_320;
} else else if (max_pixclk > 0)
return 266; return 266667;
/* Looks like the 200MHz CDclk freq doesn't work on some configs */ else
return 200000;
} }
/* compute the max pixel clock for new configuration */ /* compute the max pixel clock for new configuration */
...@@ -4829,6 +4787,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) ...@@ -4829,6 +4787,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev)) if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
* first the self-refresh mode. The self-refresh enable bit in turn
* will be checked/applied by the HW only at the next frame start
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc); intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder) for_each_encoder_on_crtc(dev, crtc, encoder)
...@@ -4837,9 +4805,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) ...@@ -4837,9 +4805,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/* /*
* On gen2 planes are double buffered but the pipe isn't, so we must * On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe. * wait for planes to fully turn off before disabling the pipe.
* We also need to wait on all gmch platforms because of the
* self-refresh mode constraint explained above.
*/ */
if (IS_GEN2(dev)) intel_wait_for_vblank(dev, pipe);
intel_wait_for_vblank(dev, pipe);
intel_disable_pipe(dev_priv, pipe); intel_disable_pipe(dev_priv, pipe);
...@@ -4956,7 +4925,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) ...@@ -4956,7 +4925,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *old_obj; struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
enum pipe pipe = to_intel_crtc(crtc)->pipe; enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */ /* crtc should still be enabled when we disable it. */
...@@ -4971,7 +4940,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) ...@@ -4971,7 +4940,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
assert_pipe_disabled(dev->dev_private, pipe); assert_pipe_disabled(dev->dev_private, pipe);
if (crtc->primary->fb) { if (crtc->primary->fb) {
old_obj = to_intel_framebuffer(crtc->primary->fb)->obj;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj); intel_unpin_fb_obj(old_obj);
i915_gem_track_fb(old_obj, NULL, i915_gem_track_fb(old_obj, NULL,
...@@ -5253,9 +5221,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, ...@@ -5253,9 +5221,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev)) if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config); hsw_compute_ips_config(crtc, pipe_config);
/* XXX: PCH clock sharing is done in ->mode_set, so make sure the old /*
* clock survives for now. */ * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) * old clock survives for now.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll; pipe_config->shared_dpll = crtc->config.shared_dpll;
if (pipe_config->has_pch_encoder) if (pipe_config->has_pch_encoder)
...@@ -5266,7 +5236,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, ...@@ -5266,7 +5236,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev) static int valleyview_get_display_clock_speed(struct drm_device *dev)
{ {
return 400000; /* FIXME */ struct drm_i915_private *dev_priv = dev->dev_private;
int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
divider = val & DISPLAY_FREQUENCY_VALUES;
WARN((val & DISPLAY_FREQUENCY_STATUS) !=
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
} }
static int i945_get_display_clock_speed(struct drm_device *dev) static int i945_get_display_clock_speed(struct drm_device *dev)
...@@ -6217,8 +6202,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc, ...@@ -6217,8 +6202,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled); plane_config->tiled);
plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
aligned_height, PAGE_SIZE); aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width, pipe, plane, crtc->base.primary->fb->width,
...@@ -7237,8 +7222,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc, ...@@ -7237,8 +7222,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled); plane_config->tiled);
plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] * plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
aligned_height, PAGE_SIZE); aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width, pipe, plane, crtc->base.primary->fb->width,
...@@ -7255,6 +7240,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ...@@ -7255,6 +7240,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp; uint32_t tmp;
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE; pipe_config->shared_dpll = DPLL_ID_PRIVATE;
...@@ -7329,7 +7318,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ...@@ -7329,7 +7318,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc; struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc) for_each_intel_crtc(dev, crtc)
...@@ -7337,9 +7325,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) ...@@ -7337,9 +7325,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
pipe_name(crtc->pipe)); pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
WARN(plls->spll_refcount, "SPLL enabled\n"); WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n"); "CPU PWM1 enabled\n");
...@@ -7360,6 +7348,16 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) ...@@ -7360,6 +7348,16 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n"); WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
} }
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
if (IS_HASWELL(dev))
return I915_READ(D_COMP_HSW);
else
return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
...@@ -7368,12 +7366,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) ...@@ -7368,12 +7366,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val)) val))
DRM_ERROR("Failed to disable D_COMP\n"); DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
} else { } else {
I915_WRITE(D_COMP, val); I915_WRITE(D_COMP_BDW, val);
POSTING_READ(D_COMP_BDW);
} }
POSTING_READ(D_COMP);
} }
/* /*
...@@ -7411,12 +7409,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, ...@@ -7411,12 +7409,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n"); DRM_ERROR("LCPLL still locked\n");
val = I915_READ(D_COMP); val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE; val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val); hsw_write_dcomp(dev_priv, val);
ndelay(100); ndelay(100);
if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
1))
DRM_ERROR("D_COMP RCOMP still in progress\n"); DRM_ERROR("D_COMP RCOMP still in progress\n");
if (allow_power_down) { if (allow_power_down) {
...@@ -7465,7 +7464,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) ...@@ -7465,7 +7464,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
POSTING_READ(LCPLL_CTL); POSTING_READ(LCPLL_CTL);
} }
val = I915_READ(D_COMP); val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE; val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE; val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val); hsw_write_dcomp(dev_priv, val);
...@@ -7571,13 +7570,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, ...@@ -7571,13 +7570,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_select(intel_crtc)) if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL; return -EINVAL;
intel_ddi_pll_enable(intel_crtc);
intel_crtc->lowfreq_avail = false; intel_crtc->lowfreq_avail = false;
return 0; return 0;
} }
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
pipe_config->shared_dpll = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
pipe_config->shared_dpll = DPLL_ID_WRPLL2;
break;
}
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
/*
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
}
static bool haswell_get_pipe_config(struct intel_crtc *crtc, static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
...@@ -7623,22 +7668,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, ...@@ -7623,22 +7668,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE)) if (!(tmp & PIPECONF_ENABLE))
return false; return false;
/* haswell_get_ddi_port_state(crtc, pipe_config);
* Haswell has only FDI/PCH transcoder A. It is which is connected to
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
ironlake_get_fdi_m_n_config(crtc, pipe_config);
}
intel_get_pipe_timings(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config);
...@@ -8326,7 +8356,7 @@ static u32 ...@@ -8326,7 +8356,7 @@ static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{ {
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); return PAGE_ALIGN(pitch * mode->vdisplay);
} }
static struct drm_framebuffer * static struct drm_framebuffer *
...@@ -9447,6 +9477,9 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, ...@@ -9447,6 +9477,9 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
* So using MMIO flips there would disrupt this mechanism. * So using MMIO flips there would disrupt this mechanism.
*/ */
if (ring == NULL)
return true;
if (INTEL_INFO(ring->dev)->gen < 5) if (INTEL_INFO(ring->dev)->gen < 5)
return false; return false;
...@@ -9595,7 +9628,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9595,7 +9628,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->primary->fb; struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work; struct intel_unpin_work *work;
...@@ -9603,6 +9636,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9603,6 +9636,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags; unsigned long flags;
int ret; int ret;
/*
* drm_mode_page_flip_ioctl() should already catch this, but double
* check to be safe. In the future we may enable pageflipping from
* a disabled primary plane.
*/
if (WARN_ON(intel_fb_obj(old_fb) == NULL))
return -EBUSY;
/* Can't change pixel format via MI display flips. */ /* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->primary->fb->pixel_format) if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL; return -EINVAL;
...@@ -9625,7 +9666,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9625,7 +9666,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event; work->event = event;
work->crtc = crtc; work->crtc = crtc;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn); INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc); ret = drm_crtc_vblank_get(crtc);
...@@ -9670,6 +9711,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -9670,6 +9711,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS]; ring = &dev_priv->ring[BCS];
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
} else if (IS_IVYBRIDGE(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring; ring = obj->ring;
if (ring == NULL || ring->id != RCS) if (ring == NULL || ring->id != RCS)
...@@ -10401,11 +10447,14 @@ intel_pipe_config_compare(struct drm_device *dev, ...@@ -10401,11 +10447,14 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide); PIPE_CONF_CHECK_I(double_wide);
PIPE_CONF_CHECK_X(ddi_pll_sel);
PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1); PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp); PIPE_CONF_CHECK_I(pipe_bpp);
...@@ -10762,10 +10811,9 @@ static int __intel_set_mode(struct drm_crtc *crtc, ...@@ -10762,10 +10811,9 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL. * on the DPLL.
*/ */
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
struct drm_framebuffer *old_fb; struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *old_obj = NULL; struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj = intel_fb_obj(fb);
to_intel_framebuffer(fb)->obj;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, ret = intel_pin_and_fence_fb_obj(dev,
...@@ -10776,11 +10824,8 @@ static int __intel_set_mode(struct drm_crtc *crtc, ...@@ -10776,11 +10824,8 @@ static int __intel_set_mode(struct drm_crtc *crtc,
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
goto done; goto done;
} }
old_fb = crtc->primary->fb; if (old_fb)
if (old_fb) {
old_obj = to_intel_framebuffer(old_fb)->obj;
intel_unpin_fb_obj(old_obj); intel_unpin_fb_obj(old_obj);
}
i915_gem_track_fb(old_obj, obj, i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -11266,18 +11311,15 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { ...@@ -11266,18 +11311,15 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip, .page_flip = intel_crtc_page_flip,
}; };
static void intel_cpu_pll_init(struct drm_device *dev)
{
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll, struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state) struct intel_dpll_hw_state *hw_state)
{ {
uint32_t val; uint32_t val;
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id)); val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val; hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
...@@ -11359,7 +11401,9 @@ static void intel_shared_dpll_init(struct drm_device *dev) ...@@ -11359,7 +11401,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev); ibx_pch_dpll_init(dev);
else else
dev_priv->num_shared_dpll = 0; dev_priv->num_shared_dpll = 0;
...@@ -11398,9 +11442,11 @@ intel_primary_plane_disable(struct drm_plane *plane) ...@@ -11398,9 +11442,11 @@ intel_primary_plane_disable(struct drm_plane *plane)
intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
intel_plane->pipe); intel_plane->pipe);
disable_unpin: disable_unpin:
i915_gem_track_fb(to_intel_framebuffer(plane->fb)->obj, NULL, mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
intel_unpin_fb_obj(to_intel_framebuffer(plane->fb)->obj); intel_unpin_fb_obj(intel_fb_obj(plane->fb));
mutex_unlock(&dev->struct_mutex);
plane->fb = NULL; plane->fb = NULL;
return 0; return 0;
...@@ -11417,7 +11463,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11417,7 +11463,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj, *old_obj = NULL; struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_rect dest = { struct drm_rect dest = {
/* integer pixels */ /* integer pixels */
.x1 = crtc_x, .x1 = crtc_x,
...@@ -11449,10 +11496,6 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11449,10 +11496,6 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret) if (ret)
return ret; return ret;
if (plane->fb)
old_obj = to_intel_framebuffer(plane->fb)->obj;
obj = to_intel_framebuffer(fb)->obj;
/* /*
* If the CRTC isn't enabled, we're just pinning the framebuffer, * If the CRTC isn't enabled, we're just pinning the framebuffer,
* updating the fb pointer, and returning without touching the * updating the fb pointer, and returning without touching the
...@@ -11460,6 +11503,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11460,6 +11503,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
* turn on the display with all planes setup as desired. * turn on the display with all planes setup as desired.
*/ */
if (!crtc->enabled) { if (!crtc->enabled) {
mutex_lock(&dev->struct_mutex);
/* /*
* If we already called setplane while the crtc was disabled, * If we already called setplane while the crtc was disabled,
* we may have an fb pinned; unpin it. * we may have an fb pinned; unpin it.
...@@ -11471,7 +11516,10 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11471,7 +11516,10 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
/* Pin and return without programming hardware */ /* Pin and return without programming hardware */
return intel_pin_and_fence_fb_obj(dev, obj, NULL); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
mutex_unlock(&dev->struct_mutex);
return ret;
} }
intel_crtc_wait_for_pending_flips(crtc); intel_crtc_wait_for_pending_flips(crtc);
...@@ -11483,14 +11531,18 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11483,14 +11531,18 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
* because plane->fb still gets set and pinned. * because plane->fb still gets set and pinned.
*/ */
if (!visible) { if (!visible) {
mutex_lock(&dev->struct_mutex);
/* /*
* Try to pin the new fb first so that we can bail out if we * Try to pin the new fb first so that we can bail out if we
* fail. * fail.
*/ */
if (plane->fb != fb) { if (plane->fb != fb) {
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret; return ret;
}
} }
i915_gem_track_fb(old_obj, obj, i915_gem_track_fb(old_obj, obj,
...@@ -11506,6 +11558,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11506,6 +11558,8 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
if (plane->fb) if (plane->fb)
intel_unpin_fb_obj(old_obj); intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -12159,7 +12213,7 @@ static void intel_init_display(struct drm_device *dev) ...@@ -12159,7 +12213,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable; dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = haswell_crtc_off; dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane = dev_priv->display.update_primary_plane =
ironlake_update_primary_plane; ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
...@@ -12426,6 +12480,9 @@ void intel_modeset_init_hw(struct drm_device *dev) ...@@ -12426,6 +12480,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
{ {
intel_prepare_ddi(dev); intel_prepare_ddi(dev);
if (IS_VALLEYVIEW(dev))
vlv_update_cdclk(dev);
intel_init_clock_gating(dev); intel_init_clock_gating(dev);
intel_reset_dpio(dev); intel_reset_dpio(dev);
...@@ -12502,7 +12559,6 @@ void intel_modeset_init(struct drm_device *dev) ...@@ -12502,7 +12559,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_dpio(dev); intel_init_dpio(dev);
intel_reset_dpio(dev); intel_reset_dpio(dev);
intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev); intel_shared_dpll_init(dev);
/* Just disable it once at startup */ /* Just disable it once at startup */
...@@ -12811,10 +12867,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) ...@@ -12811,10 +12867,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled"); crtc->active ? "enabled" : "disabled");
} }
/* FIXME: Smash this into the new shared dpll infrastructure. */
if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
for (i = 0; i < dev_priv->num_shared_dpll; i++) { for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
...@@ -12828,6 +12880,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) ...@@ -12828,6 +12880,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on); pll->name, pll->refcount, pll->on);
if (pll->refcount)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} }
list_for_each_entry(encoder, &dev->mode_config.encoder_list, list_for_each_entry(encoder, &dev->mode_config.encoder_list,
...@@ -12945,7 +13000,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, ...@@ -12945,7 +13000,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev) void intel_modeset_gem_init(struct drm_device *dev)
{ {
struct drm_crtc *c; struct drm_crtc *c;
struct intel_framebuffer *fb; struct drm_i915_gem_object *obj;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev); intel_init_gt_powersave(dev);
...@@ -12962,11 +13017,11 @@ void intel_modeset_gem_init(struct drm_device *dev) ...@@ -12962,11 +13017,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
*/ */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) { for_each_crtc(dev, c) {
if (!c->primary->fb) obj = intel_fb_obj(c->primary->fb);
if (obj == NULL)
continue; continue;
fb = to_intel_framebuffer(c->primary->fb); if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n", DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe); to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb); drm_framebuffer_unreference(c->primary->fb);
......
...@@ -745,6 +745,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) ...@@ -745,6 +745,22 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
intel_connector_unregister(intel_connector); intel_connector_unregister(intel_connector);
} }
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
}
}
static void static void
intel_dp_set_clock(struct intel_encoder *encoder, intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw) struct intel_crtc_config *pipe_config, int link_bw)
...@@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder, ...@@ -756,8 +772,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) { if (IS_G4X(dev)) {
divisor = gen4_dpll; divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll); count = ARRAY_SIZE(gen4_dpll);
} else if (IS_HASWELL(dev)) {
/* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll; divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll); count = ARRAY_SIZE(pch_dpll);
...@@ -928,7 +942,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -928,7 +942,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
&pipe_config->dp_m2_n2); &pipe_config->dp_m2_n2);
} }
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); if (HAS_DDI(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true; return true;
} }
...@@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) ...@@ -1316,8 +1333,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n"); DRM_DEBUG_KMS("Turn eDP power off\n");
edp_wait_backlight_off(intel_dp);
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp); pp = ironlake_get_pp_control(intel_dp);
...@@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) ...@@ -1353,6 +1368,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return; return;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
intel_panel_enable_backlight(intel_dp->attached_connector);
/* /*
* If we enable the backlight right away following a panel power * If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP * on, we may see slight flicker as the panel syncs with the eDP
...@@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp) ...@@ -1367,8 +1385,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp); I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg); POSTING_READ(pp_ctrl_reg);
intel_panel_enable_backlight(intel_dp->attached_connector);
} }
void intel_edp_backlight_off(struct intel_dp *intel_dp) void intel_edp_backlight_off(struct intel_dp *intel_dp)
...@@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) ...@@ -1381,8 +1397,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp)) if (!is_edp(intel_dp))
return; return;
intel_panel_disable_backlight(intel_dp->attached_connector);
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp); pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE; pp &= ~EDP_BLC_ENABLE;
...@@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp) ...@@ -1392,6 +1406,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp); I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg); POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies; intel_dp->last_backlight_off = jiffies;
edp_wait_backlight_off(intel_dp);
intel_panel_disable_backlight(intel_dp->attached_connector);
} }
static void ironlake_edp_pll_on(struct intel_dp *intel_dp) static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
...@@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -1751,7 +1769,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc; struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
dev_priv->psr.source_ok = false; dev_priv->psr.source_ok = false;
...@@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -1784,7 +1802,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false; return false;
} }
obj = to_intel_framebuffer(crtc->primary->fb)->obj;
if (obj->tiling_mode != I915_TILING_X || if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) { obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
...@@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) ...@@ -3815,6 +3832,22 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
intel_dp_check_link_status(intel_dp); intel_dp_check_link_status(intel_dp);
} }
bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (long_hpd)
return true;
/*
* we'll check the link status via the normal hot plug path later -
* but for short hpds we should check it now
*/
intel_dp_check_link_status(intel_dp);
return false;
}
/* Return which DP Port should be selected for Transcoder DP control */ /* Return which DP Port should be selected for Transcoder DP control */
int int
intel_trans_dp_port_sel(struct drm_crtc *crtc) intel_trans_dp_port_sel(struct drm_crtc *crtc)
...@@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -4387,6 +4420,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port; struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct drm_encoder *encoder; struct drm_encoder *encoder;
...@@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) ...@@ -4443,6 +4477,9 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0; intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug; intel_encoder->hot_plug = intel_dp_hot_plug;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hpd_irq_port[port] = intel_dig_port;
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder); drm_encoder_cleanup(encoder);
kfree(intel_dig_port); kfree(intel_dig_port);
......
...@@ -307,6 +307,9 @@ struct intel_crtc_config { ...@@ -307,6 +307,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */ /* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll; enum intel_dpll_id shared_dpll;
/* PORT_CLK_SEL for DDI ports. */
uint32_t ddi_pll_sel;
/* Actual register state of the dpll, for shared dpll cross-checking. */ /* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state; struct intel_dpll_hw_state dpll_hw_state;
...@@ -338,6 +341,7 @@ struct intel_crtc_config { ...@@ -338,6 +341,7 @@ struct intel_crtc_config {
u32 pos; u32 pos;
u32 size; u32 size;
bool enabled; bool enabled;
bool force_thru;
} pch_pfit; } pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */ /* FDI configuration, only valid if has_pch_encoder is set. */
...@@ -398,8 +402,6 @@ struct intel_crtc { ...@@ -398,8 +402,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config; struct intel_crtc_config *new_config;
bool new_enabled; bool new_enabled;
uint32_t ddi_pll_sel;
/* reset counter value when the last flip was submitted */ /* reset counter value when the last flip was submitted */
unsigned int reset_counter; unsigned int reset_counter;
...@@ -485,6 +487,7 @@ struct cxsr_latency { ...@@ -485,6 +487,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi { struct intel_hdmi {
u32 hdmi_reg; u32 hdmi_reg;
...@@ -567,6 +570,7 @@ struct intel_digital_port { ...@@ -567,6 +570,7 @@ struct intel_digital_port {
u32 saved_port_bits; u32 saved_port_bits;
struct intel_dp dp; struct intel_dp dp;
struct intel_hdmi hdmi; struct intel_hdmi hdmi;
bool (*hpd_pulse)(struct intel_digital_port *, bool);
}; };
static inline int static inline int
...@@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, ...@@ -706,10 +710,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder); enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc); bool intel_ddi_pll_select(struct intel_crtc *crtc);
void intel_ddi_pll_enable(struct intel_crtc *crtc);
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
...@@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, ...@@ -722,7 +723,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
const char *intel_output_name(int output); const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev); bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev);
int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev); void intel_mark_busy(struct drm_device *dev);
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
...@@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev, ...@@ -793,12 +793,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv, void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll, struct intel_shared_dpll *pll,
bool state); bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv, void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state); enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_enabled(d, p) assert_pll(d, p, true)
...@@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc); ...@@ -831,7 +837,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder); intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format); int intel_format_to_fourcc(int format);
...@@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); ...@@ -852,6 +857,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder, bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port); bool intel_dp_is_edp(struct drm_device *dev, enum port port);
bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp); void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp); void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
...@@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); ...@@ -863,7 +870,6 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_exit(struct drm_device *dev); void intel_edp_psr_exit(struct drm_device *dev);
void intel_edp_psr_init(struct drm_device *dev); void intel_edp_psr_init(struct drm_device *dev);
/* intel_dsi.c */ /* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev); void intel_dsi_init(struct drm_device *dev);
...@@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv); ...@@ -1005,8 +1011,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv); void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv); void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev);
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id, bool enable);
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
......
...@@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -107,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth); sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE); size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
......
...@@ -34,11 +34,6 @@ ...@@ -34,11 +34,6 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
enum disp_clk {
CDCLK,
CZCLK
};
struct gmbus_port { struct gmbus_port {
const char *name; const char *name;
int reg; int reg;
...@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c) ...@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter); return container_of(i2c, struct intel_gmbus, adapter);
} }
static int get_disp_clk_div(struct drm_i915_private *dev_priv,
enum disp_clk clk)
{
u32 reg_val;
int clk_ratio;
reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
if (clk == CDCLK)
clk_ratio =
((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
else
clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
return clk_ratio;
}
static void gmbus_set_freq(struct drm_i915_private *dev_priv)
{
int vco, gmbus_freq = 0, cdclk_div;
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
vco = valleyview_get_vco(dev_priv);
/* Get the CDCLK divide ratio */
cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
if (cdclk_div)
gmbus_freq = (vco << 1) / cdclk_div;
if (WARN_ON(gmbus_freq == 0))
return;
I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
}
void void
intel_i2c_reset(struct drm_device *dev) intel_i2c_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/*
* In BIOS-less system, program the correct gmbus frequency
* before reading edid.
*/
if (IS_VALLEYVIEW(dev))
gmbus_set_freq(dev_priv);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
} }
......
...@@ -51,6 +51,7 @@ struct intel_lvds_encoder { ...@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link; bool is_dual_link;
u32 reg; u32 reg;
u32 a3_power;
struct intel_lvds_connector *attached_connector; struct intel_lvds_connector *attached_connector;
}; };
...@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, ...@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp; u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(lvds_encoder->reg); tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN)) if (!(tmp & LVDS_PORT_EN))
...@@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) ...@@ -165,8 +171,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how * appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes. * panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
*/ */
temp &= ~LVDS_A3_POWER_MASK;
temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no /* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is * special lvds dither control bit on pch-split platforms, dithering is
...@@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, ...@@ -264,7 +273,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config) struct intel_crtc_config *pipe_config)
{ {
struct drm_device *dev = intel_encoder->base.dev; struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base); to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector = struct intel_connector *intel_connector =
...@@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, ...@@ -279,8 +287,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false; return false;
} }
if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
LVDS_A3_POWER_UP)
lvds_bpp = 8*3; lvds_bpp = 8*3;
else else
lvds_bpp = 6*3; lvds_bpp = 6*3;
...@@ -1081,6 +1088,9 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -1081,6 +1088,9 @@ void intel_lvds_init(struct drm_device *dev)
DRM_DEBUG_KMS("detected %s-link lvds configuration\n", DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
LVDS_A3_POWER_MASK;
/* /*
* Unlock registers and just * Unlock registers and just
* leave them unlocked * leave them unlocked
......
...@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc) ...@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb; struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch; int cfb_pitch;
int i; int i;
...@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc) ...@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb; struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl; u32 dpfc_ctl;
...@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc) ...@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb; struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl; u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X; dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= DPFC_CTL_FENCE_EN; dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev)) if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg; dpfc_ctl |= obj->fence_reg;
...@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc) ...@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb; struct drm_framebuffer *fb = crtc->primary->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl; u32 dpfc_ctl;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
switch (dev_priv->fbc.threshold) {
case 4:
case 3:
dpfc_ctl |= DPFC_CTL_LIMIT_4X;
break;
case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X; dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else break;
case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_LIMIT_1X;
break;
}
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
...@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev) ...@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_crtc *crtc = NULL, *tmp_crtc; struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode; const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height; unsigned int max_width, max_height;
...@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev) ...@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb; fb = crtc->primary->fb;
intel_fb = to_intel_framebuffer(fb); obj = intel_fb_obj(fb);
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode; adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) { if (i915.enable_fbc < 0) {
...@@ -566,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev) ...@@ -566,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master()) if (in_dbg_master())
goto out_disable; goto out_disable;
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { if (i915_gem_stolen_setup_compression(dev, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable; goto out_disable;
...@@ -792,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, ...@@ -792,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL; return NULL;
} }
static void pineview_disable_cxsr(struct drm_device *dev) void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_device *dev = dev_priv->dev;
u32 val;
/* deactivate cxsr */ if (IS_VALLEYVIEW(dev)) {
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
} else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
} else if (IS_PINEVIEW(dev)) {
val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
I915_WRITE(DSPFW3, val);
} else if (IS_I945G(dev) || IS_I945GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
} else if (IS_I915GM(dev)) {
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
} else {
return;
}
DRM_DEBUG_KMS("memory self-refresh is %s\n",
enable ? "enabled" : "disabled");
} }
/* /*
...@@ -1036,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) ...@@ -1036,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
dev_priv->fsb_freq, dev_priv->mem_freq); dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) { if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev); intel_set_memory_cxsr(dev_priv, false);
return; return;
} }
...@@ -1087,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) ...@@ -1087,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg); I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */ intel_set_memory_cxsr(dev_priv, true);
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else { } else {
pineview_disable_cxsr(dev); intel_set_memory_cxsr(dev_priv, false);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
} }
} }
...@@ -1319,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc) ...@@ -1319,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
int plane_sr, cursor_sr; int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr; int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0; unsigned int enabled = 0;
bool cxsr_enabled;
vlv_update_drain_latency(dev); vlv_update_drain_latency(dev);
...@@ -1345,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc) ...@@ -1345,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&valleyview_wm_info, &valleyview_wm_info,
&valleyview_cursor_wm_info, &valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) { &ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); cxsr_enabled = true;
} else { } else {
I915_WRITE(FW_BLC_SELF_VLV, cxsr_enabled = false;
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0; plane_sr = cursor_sr = 0;
} }
...@@ -1368,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc) ...@@ -1368,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3, I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT)); (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
} }
static void g4x_update_wm(struct drm_crtc *crtc) static void g4x_update_wm(struct drm_crtc *crtc)
...@@ -1378,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc) ...@@ -1378,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr; int plane_sr, cursor_sr;
unsigned int enabled = 0; unsigned int enabled = 0;
bool cxsr_enabled;
if (g4x_compute_wm0(dev, PIPE_A, if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns, &g4x_wm_info, latency_ns,
...@@ -1397,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc) ...@@ -1397,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
&g4x_wm_info, &g4x_wm_info,
&g4x_cursor_wm_info, &g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) { &plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); cxsr_enabled = true;
} else { } else {
I915_WRITE(FW_BLC_SELF, cxsr_enabled = false;
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0; plane_sr = cursor_sr = 0;
} }
...@@ -1421,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc) ...@@ -1421,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3, I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT)); (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
} }
static void i965_update_wm(struct drm_crtc *unused_crtc) static void i965_update_wm(struct drm_crtc *unused_crtc)
...@@ -1430,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) ...@@ -1430,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
struct drm_crtc *crtc; struct drm_crtc *crtc;
int srwm = 1; int srwm = 1;
int cursor_sr = 16; int cursor_sr = 16;
bool cxsr_enabled;
/* Calc sr entries for one plane configs */ /* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev); crtc = single_enabled_crtc(dev);
...@@ -1471,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) ...@@ -1471,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh watermark: display plane %d " DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr); "cursor %d\n", srwm, cursor_sr);
if (IS_CRESTLINE(dev)) cxsr_enabled = true;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else { } else {
cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */ /* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev)) intel_set_memory_cxsr(dev_priv, false);
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
} }
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
...@@ -1489,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) ...@@ -1489,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */ /* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
} }
static void i9xx_update_wm(struct drm_crtc *unused_crtc) static void i9xx_update_wm(struct drm_crtc *unused_crtc)
...@@ -1548,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) ...@@ -1548,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev) && enabled) { if (IS_I915GM(dev) && enabled) {
struct intel_framebuffer *fb; struct drm_i915_gem_object *obj;
fb = to_intel_framebuffer(enabled->primary->fb); obj = intel_fb_obj(enabled->primary->fb);
/* self-refresh seems busted with untiled */ /* self-refresh seems busted with untiled */
if (fb->obj->tiling_mode == I915_TILING_NONE) if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL; enabled = NULL;
} }
...@@ -1563,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) ...@@ -1563,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
cwm = 2; cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */ /* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev)) intel_set_memory_cxsr(dev_priv, false);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
/* Calc sr entries for one plane configs */ /* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) { if (HAS_FW_BLC(dev) && enabled) {
...@@ -1612,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) ...@@ -1612,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo); I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi); I915_WRITE(FW_BLC2, fwater_hi);
if (HAS_FW_BLC(dev)) { if (enabled)
if (enabled) { intel_set_memory_cxsr(dev_priv, true);
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
} }
static void i845_update_wm(struct drm_crtc *unused_crtc) static void i845_update_wm(struct drm_crtc *unused_crtc)
...@@ -3150,6 +3183,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) ...@@ -3150,6 +3183,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (val < dev_priv->rps.max_freq_softlimit) if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD; mask |= GEN6_PM_RP_UP_THRESHOLD;
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
/* IVB and SNB hard hangs on looping batchbuffer /* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked. * if GEN6_PM_UP_EI_EXPIRED is masked.
*/ */
...@@ -3493,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev) ...@@ -3493,15 +3529,23 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, unused) for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */ /* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE; rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask); intel_print_rc6_info(dev, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | if (IS_BROADWELL(dev))
GEN6_RC_CTL_EI_MODE(1) | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
rc6_mask); GEN7_RC_CTL_TO_MODE |
rc6_mask);
else
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
/* 4 Program defaults and thresholds for RPS*/ /* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ, I915_WRITE(GEN6_RPNSWREQ,
...@@ -4078,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev) ...@@ -4078,6 +4122,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL, I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO | GEN6_RP_MEDIA_TURBO |
...@@ -4098,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev) ...@@ -4098,9 +4143,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* allows RC6 residency counter to work */ /* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL, I915_WRITE(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN | VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN)); VLV_RENDER_RC6_COUNT_EN));
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
...@@ -5328,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev) ...@@ -5328,7 +5375,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3, I915_WRITE(_3D_CHICKEN3,
_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)); _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
I915_WRITE(COMMON_SLICE_CHICKEN2, I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE)); _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
...@@ -5563,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) ...@@ -5563,10 +5610,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
} }
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
dev_priv->vlv_cdclk_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */ /* WaDisableEarlyCull:vlv */
...@@ -5982,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, ...@@ -5982,34 +6025,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true; return true;
} }
void __vlv_set_power_well(struct drm_i915_private *dev_priv, static void vlv_set_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id, bool enable) struct i915_power_well *power_well, bool enable)
{ {
struct drm_device *dev = dev_priv->dev; enum punit_power_well power_well_id = power_well->data;
u32 mask; u32 mask;
u32 state; u32 state;
u32 ctrl; u32 ctrl;
enum pipe pipe;
if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
if (enable) {
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV |
DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
} else {
for_each_pipe(pipe)
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
~DPIO_CMNRST);
}
}
mask = PUNIT_PWRGT_MASK(power_well_id); mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
...@@ -6037,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv, ...@@ -6037,28 +6059,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
out: out:
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all
* be set to 0.
*
* This should only be done on init and resume from S3 with
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
enum punit_power_well power_well_id = power_well->data;
__vlv_set_power_well(dev_priv, power_well_id, enable);
} }
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv, static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
...@@ -6150,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, ...@@ -6150,6 +6150,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false); vlv_set_power_well(dev_priv, power_well, false);
} }
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all
* be set to 0.
*
* This should only be done on init and resume from S3 with
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
for_each_pipe(pipe)
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
vlv_set_power_well(dev_priv, power_well, false);
}
static void check_power_well_state(struct drm_i915_private *dev_priv, static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well) struct i915_power_well *power_well)
{ {
...@@ -6299,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); ...@@ -6299,6 +6346,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \ BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \ BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT)) BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \ #define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
...@@ -6398,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = { ...@@ -6398,6 +6446,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
.is_enabled = vlv_power_well_enabled, .is_enabled = vlv_power_well_enabled,
}; };
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
static const struct i915_power_well_ops vlv_dpio_power_well_ops = { static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw, .sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable, .enable = vlv_power_well_enable,
...@@ -6458,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = { ...@@ -6458,10 +6513,25 @@ static struct i915_power_well vlv_power_wells[] = {
.name = "dpio-common", .name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC, .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_power_well_ops, .ops = &vlv_dpio_cmn_power_well_ops,
}, },
}; };
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
}
return NULL;
}
#define set_power_wells(power_domains, __power_wells) ({ \ #define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \ (power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
...@@ -6512,11 +6582,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv) ...@@ -6512,11 +6582,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock); mutex_unlock(&power_domains->lock);
} }
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *disp2d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
/* nothing to do if common lane is already off */
if (!cmn->ops->is_enabled(dev_priv, cmn))
return;
/* If the display might be already active skip this */
if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
DRM_DEBUG_KMS("toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
disp2d->ops->enable(dev_priv, disp2d);
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
cmn->ops->disable(dev_priv, cmn);
}
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true; power_domains->initializing = true;
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
}
/* For now, we need the power well to be always enabled. */ /* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true); intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv); intel_power_domains_resume(dev_priv);
...@@ -6689,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev) ...@@ -6689,7 +6798,7 @@ void intel_init_pm(struct drm_device *dev)
(dev_priv->is_ddr3 == 1) ? "3" : "2", (dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq); dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */ /* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev); intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL; dev_priv->display.update_wm = NULL;
} else } else
dev_priv->display.update_wm = pineview_update_wm; dev_priv->display.update_wm = pineview_update_wm;
......
...@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size) ...@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
return space; return space;
} }
static inline int ring_space(struct intel_engine_cs *ring) static inline int ring_space(struct intel_ringbuffer *ringbuf)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
} }
...@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring) ...@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else { else {
ringbuf->head = I915_READ_HEAD(ring); ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ring_space(ring); ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1; ringbuf->last_retired_head = -1;
} }
...@@ -660,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring) ...@@ -660,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring) static void render_ring_cleanup(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->semaphore_obj) {
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
dev_priv->semaphore_obj = NULL;
}
if (ring->scratch.obj == NULL) if (ring->scratch.obj == NULL)
return; return;
...@@ -673,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) ...@@ -673,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
ring->scratch.obj = NULL; ring->scratch.obj = NULL;
} }
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
int i, ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
for_each_ring(waiter, dev_priv, i) {
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
}
return 0;
}
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
int i, ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
for_each_ring(waiter, dev_priv, i) {
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
}
return 0;
}
static int gen6_signal(struct intel_engine_cs *signaller, static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords) unsigned int num_dwords)
{ {
struct drm_device *dev = signaller->dev; struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless; struct intel_engine_cs *useless;
int i, ret; int i, ret, num_rings;
/* NB: In order to be able to do semaphore MBOX updates for varying #define MBOX_UPDATE_DWORDS 3
* number of rings, it's easiest if we round up each individual update num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
* to a multiple of 2 (since ring updates must always be a multiple of num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
* 2) even though the actual update only requires 3 dwords. #undef MBOX_UPDATE_DWORDS
*/
#define MBOX_UPDATE_DWORDS 4
if (i915_semaphore_is_enabled(dev))
num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
else
return intel_ring_begin(signaller, num_dwords);
ret = intel_ring_begin(signaller, num_dwords); ret = intel_ring_begin(signaller, num_dwords);
if (ret) if (ret)
return ret; return ret;
#undef MBOX_UPDATE_DWORDS
for_each_ring(useless, dev_priv, i) { for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i]; u32 mbox_reg = signaller->semaphore.mbox.signal[i];
...@@ -703,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller, ...@@ -703,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg); intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
intel_ring_emit(signaller, MI_NOOP);
} else {
intel_ring_emit(signaller, MI_NOOP);
intel_ring_emit(signaller, MI_NOOP);
intel_ring_emit(signaller, MI_NOOP);
intel_ring_emit(signaller, MI_NOOP);
} }
} }
/* If num_dwords was rounded, make sure the tail pointer is correct */
if (num_rings % 2 == 0)
intel_ring_emit(signaller, MI_NOOP);
return 0; return 0;
} }
...@@ -729,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring) ...@@ -729,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
{ {
int ret; int ret;
ret = ring->semaphore.signal(ring, 4); if (ring->semaphore.signal)
ret = ring->semaphore.signal(ring, 4);
else
ret = intel_ring_begin(ring, 4);
if (ret) if (ret)
return ret; return ret;
...@@ -756,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, ...@@ -756,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @signaller - ring which has, or will signal * @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on * @seqno - seqno which the waiter will block on
*/ */
static int
gen8_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
u32 seqno)
{
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter,
lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
intel_ring_emit(waiter,
upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
intel_ring_advance(waiter);
return 0;
}
static int static int
gen6_ring_sync(struct intel_engine_cs *waiter, gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller, struct intel_engine_cs *signaller,
...@@ -1331,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring) ...@@ -1331,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
if ((obj = ring->status_page.obj) == NULL) { if ((obj = ring->status_page.obj) == NULL) {
unsigned flags;
int ret; int ret;
obj = i915_gem_alloc_object(ring->dev, 4096); obj = i915_gem_alloc_object(ring->dev, 4096);
...@@ -1343,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring) ...@@ -1343,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
if (ret) if (ret)
goto err_unref; goto err_unref;
ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); flags = 0;
if (!HAS_LLC(ring->dev))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actualy map it).
*/
flags |= PIN_MAPPABLE;
ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) { if (ret) {
err_unref: err_unref:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
...@@ -1380,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring) ...@@ -1380,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0; return 0;
} }
static int allocate_ring_buffer(struct intel_engine_cs *ring) static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
iounmap(ringbuf->virtual_start);
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{ {
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
if (intel_ring_initialized(ring)) if (ringbuf->obj)
return 0; return 0;
obj = NULL; obj = NULL;
...@@ -1460,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1460,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error; goto error;
} }
ret = allocate_ring_buffer(ring); ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) { if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error; goto error;
...@@ -1501,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) ...@@ -1501,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring); intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
iounmap(ringbuf->virtual_start); intel_destroy_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
ring->preallocated_lazy_request = NULL; ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0; ring->outstanding_lazy_seqno = 0;
...@@ -1531,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ...@@ -1531,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head; ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1; ringbuf->last_retired_head = -1;
ringbuf->space = ring_space(ring); ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) if (ringbuf->space >= n)
return 0; return 0;
} }
...@@ -1554,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) ...@@ -1554,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head; ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1; ringbuf->last_retired_head = -1;
ringbuf->space = ring_space(ring); ringbuf->space = ring_space(ringbuf);
return 0; return 0;
} }
...@@ -1583,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) ...@@ -1583,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring); trace_i915_ring_wait_begin(ring);
do { do {
ringbuf->head = I915_READ_HEAD(ring); ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = ring_space(ring); ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) { if (ringbuf->space >= n) {
ret = 0; ret = 0;
break; break;
...@@ -1635,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) ...@@ -1635,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++); iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0; ringbuf->tail = 0;
ringbuf->space = ring_space(ring); ringbuf->space = ring_space(ringbuf);
return 0; return 0;
} }
...@@ -1947,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -1947,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS]; struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct drm_i915_gem_object *obj;
int ret;
ring->name = "render ring"; ring->name = "render ring";
ring->id = RCS; ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE; ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) {
obj = i915_gem_alloc_object(dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
i915.semaphores = 0;
} else {
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
i915.semaphores = 0;
} else
dev_priv->semaphore_obj = obj;
}
}
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
WARN_ON(!dev_priv->semaphore_obj);
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_rcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush; ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6) if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush; ring->flush = gen6_render_ring_flush;
if (INTEL_INFO(dev)->gen >= 8) { ring->irq_get = gen6_ring_get_irq;
ring->flush = gen8_render_ring_flush; ring->irq_put = gen6_ring_put_irq;
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
} else {
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
}
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno; ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno; ring->set_seqno = ring_set_seqno;
ring->semaphore.sync_to = gen6_ring_sync; if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.signal = gen6_signal; ring->semaphore.sync_to = gen6_ring_sync;
/* ring->semaphore.signal = gen6_signal;
* The current semaphore is only applied on pre-gen8 platform. /*
* And there is no VCS2 ring on the pre-gen8 platform. So the * The current semaphore is only applied on pre-gen8
* semaphore between RCS and VCS2 is initialized as INVALID. * platform. And there is no VCS2 ring on the pre-gen8
* Gen8 will initialize the sema between VCS2 and RCS later. * platform. So the semaphore between RCS and VCS2 is
*/ * initialized as INVALID. Gen8 will initialize the
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; * sema between VCS2 and RCS later.
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; */
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} else if (IS_GEN5(dev)) { } else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request; ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush; ring->flush = gen4_render_ring_flush;
...@@ -2013,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2013,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT; ring->irq_enable_mask = I915_USER_INTERRUPT;
} }
ring->write_tail = ring_write_tail; ring->write_tail = ring_write_tail;
if (IS_HASWELL(dev)) if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev)) else if (IS_GEN8(dev))
...@@ -2030,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2030,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */ /* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) { if (HAS_BROKEN_CS_TLB(dev)) {
struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) { if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n"); DRM_ERROR("Failed to allocate batch bo\n");
...@@ -2163,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ...@@ -2163,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq; ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer; gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else { } else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq; ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer; gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} }
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8 platform.
* And there is no VCS2 ring on the pre-gen8 platform. So the
* semaphore between VCS and VCS2 is initialized as INVALID.
* Gen8 will initialize the sema between VCS2 and VCS later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else { } else {
ring->mmio_base = BSD_RING_BASE; ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush; ring->flush = bsd_ring_flush;
...@@ -2224,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ...@@ -2224,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
return -EINVAL; return -EINVAL;
} }
ring->name = "bds2_ring"; ring->name = "bsd2 ring";
ring->id = VCS2; ring->id = VCS2;
ring->write_tail = ring_write_tail; ring->write_tail = ring_write_tail;
...@@ -2239,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) ...@@ -2239,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq; ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer; gen8_ring_dispatch_execbuffer;
ring->semaphore.sync_to = gen6_ring_sync; if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.signal = gen6_signal; ring->semaphore.sync_to = gen8_ring_sync;
/* ring->semaphore.signal = gen8_xcs_signal;
* The current semaphore is only applied on the pre-gen8. And there GEN8_RING_SEMAPHORE_INIT;
* is no bsd2 ring on the pre-gen8. So now the semaphore_register }
* between VCS2 and other ring is initialized as invalid.
* Gen8 will initialize the sema between VCS2 and other ring later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common; ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
...@@ -2283,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) ...@@ -2283,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq; ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq; ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else { } else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq; ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.signal = gen6_signal;
ring->semaphore.sync_to = gen6_ring_sync;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
* platform. So the semaphore between BCS and VCS2 is
* initialized as INVALID. Gen8 will initialize the
* sema between BCS and VCS2 later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} }
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8 platform. And
* there is no VCS2 ring on the pre-gen8 platform. So the semaphore
* between BCS and VCS2 is initialized as INVALID.
* Gen8 will initialize the sema between BCS and VCS2 later.
*/
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common; ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
...@@ -2333,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ...@@ -2333,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq; ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq; ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen8_ring_sync;
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
} else { } else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq; ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq; ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} }
ring->semaphore.sync_to = gen6_ring_sync;
ring->semaphore.signal = gen6_signal;
ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common; ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
......
...@@ -40,6 +40,32 @@ struct intel_hw_status_page { ...@@ -40,6 +40,32 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
#define i915_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action { enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0, HANGCHECK_IDLE = 0,
HANGCHECK_WAIT, HANGCHECK_WAIT,
...@@ -127,15 +153,55 @@ struct intel_engine_cs { ...@@ -127,15 +153,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2 #define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring); void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
* |-------------------------------------------------------------------
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
* |-------------------------------------------------------------------
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
* |-------------------------------------------------------------------
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
* ie. transpose of g(x, y)
*
* sync from sync from sync from sync from sync from
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
* |-------------------------------------------------------------------
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
* |-------------------------------------------------------------------
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
* |-------------------------------------------------------------------
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
* ie. transpose of f(x, y)
*/
struct { struct {
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_RINGS-1];
struct { union {
/* our mbox written by others */ struct {
u32 wait[I915_NUM_RINGS]; /* our mbox written by others */
/* mboxes this ring signals to */ u32 wait[I915_NUM_RINGS];
u32 signal[I915_NUM_RINGS]; /* mboxes this ring signals to */
} mbox; u32 signal[I915_NUM_RINGS];
} mbox;
u64 signal_ggtt[I915_NUM_RINGS];
};
/* AKA wait() */ /* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring, int (*sync_to)(struct intel_engine_cs *ring,
...@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring, ...@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx; int idx;
/* /*
* cs -> 0 = vcs, 1 = bcs * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = cs, * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = cs, 1 = vcs. * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/ */
idx = (other - ring) - 1; idx = (other - ring) - 1;
...@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev); ...@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring); u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring); void intel_ring_setup_status_page(struct intel_engine_cs *ring);
static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{ {
return ring->buffer->tail; return ringbuf->tail;
} }
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
......
...@@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, ...@@ -1010,7 +1010,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad) if (args->flags || args->pad)
return -EINVAL; return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册