提交 c99d1530 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2015-05-22' of git://anongit.freedesktop.org/drm-intel into drm-next

- cpt modeset sequence fixes from Ville
- more rps boosting tuning from Chris
- S3 support for skl (Damien)
- a pile of w/a for bxt from various people
- cleanup of primary plane pixel formats (Damien)
- a big pile of small patches with fixes and cleanups all over

* tag 'drm-intel-next-2015-05-22' of git://anongit.freedesktop.org/drm-intel: (90 commits)
  drm/i915: Update DRIVER_DATE to 20150522
  drm/i915: Introduce DRM_I915_THROTTLE_JIFFIES
  drm/i915: Use the correct destructor for freeing requests on error
  drm/i915/skl: don't fail colorkey + scaler request
  drm/i915: Enable GTT caching on gen8
  drm/i915: Move WaProgramL3SqcReg1Default:bdw to init_clock_gating()
  drm/i915: Use ilk_init_lp_watermarks() on BDW
  drm/i915: Disable FDI RX/TX before the ports
  drm/i915: Disable CRT port after pipe on PCH platforms
  drm/i915: Disable SDVO port after the pipe on PCH platforms
  drm/i915: Disable HDMI port after the pipe on PCH platforms
  drm/i915: Fix the IBX transcoder B workarounds
  drm/i915: Write the SDVO reg twice on IBX
  drm/i915: Fix DP enhanced framing for CPT
  drm/i915: Clean up the CPT DP .get_hw_state() port readout
  drm/i915: Clarfify the DP code platform checks
  drm/i915: Remove the double register write from intel_disable_hdmi()
  drm/i915: Remove a bogus 12bpc "toggle" from intel_disable_hdmi()
  drm/i915/skl: Deinit/init the display at suspend/resume
  drm/i915: Free RPS boosts for all laggards
  ...
...@@ -4153,6 +4153,12 @@ int num_ioctls;</synopsis> ...@@ -4153,6 +4153,12 @@ int num_ioctls;</synopsis>
</tgroup> </tgroup>
</table> </table>
</sect2> </sect2>
<sect2>
<title>CSR firmware support for DMC</title>
!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
!Idrivers/gpu/drm/i915/intel_csr.c
</sect2>
</sect1> </sect1>
<sect1> <sect1>
...@@ -4204,7 +4210,6 @@ int num_ioctls;</synopsis> ...@@ -4204,7 +4210,6 @@ int num_ioctls;</synopsis>
!Idrivers/gpu/drm/i915/i915_gem_shrinker.c !Idrivers/gpu/drm/i915/i915_gem_shrinker.c
</sect2> </sect2>
</sect1> </sect1>
<sect1> <sect1>
<title> Tracing </title> <title> Tracing </title>
<para> <para>
......
...@@ -71,3 +71,11 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT ...@@ -71,3 +71,11 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option. option changes the default for that module option.
If in doubt, say "N". If in doubt, say "N".
menu "DRM i915 Debugging"
depends on DRM_I915
source drivers/gpu/drm/i915/Kconfig.debug
endmenu
config DRM_I915_WERROR
bool "Force GCC to throw an error instead of a warning when compiling"
default n
---help---
Add -Werror to the build flags for (and only for) i915.ko
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
# Makefile for the drm device driver. This driver provides support for the # Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
# Please keep these build lists sorted! # Please keep these build lists sorted!
# core driver code # core driver code
......
...@@ -120,10 +120,13 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj) ...@@ -120,10 +120,13 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *ring;
struct i915_vma *vma; struct i915_vma *vma;
int pin_count = 0; int pin_count = 0;
int i;
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s", seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
&obj->base, &obj->base,
obj->active ? "*" : " ", obj->active ? "*" : " ",
get_pin_flag(obj), get_pin_flag(obj),
...@@ -131,8 +134,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -131,8 +134,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
get_global_flag(obj), get_global_flag(obj),
obj->base.size / 1024, obj->base.size / 1024,
obj->base.read_domains, obj->base.read_domains,
obj->base.write_domain, obj->base.write_domain);
i915_gem_request_get_seqno(obj->last_read_req), for_each_ring(ring, dev_priv, i)
seq_printf(m, "%x ",
i915_gem_request_get_seqno(obj->last_read_req[i]));
seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req), i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req), i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
...@@ -169,9 +175,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -169,9 +175,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
*t = '\0'; *t = '\0';
seq_printf(m, " (%s mappable)", s); seq_printf(m, " (%s mappable)", s);
} }
if (obj->last_read_req != NULL) if (obj->last_write_req != NULL)
seq_printf(m, " (%s)", seq_printf(m, " (%s)",
i915_gem_request_get_ring(obj->last_read_req)->name); i915_gem_request_get_ring(obj->last_write_req)->name);
if (obj->frontbuffer_bits) if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
} }
...@@ -665,7 +671,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) ...@@ -665,7 +671,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *req;
int ret, any, i; int ret, any, i;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
...@@ -677,22 +683,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data) ...@@ -677,22 +683,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
int count; int count;
count = 0; count = 0;
list_for_each_entry(rq, &ring->request_list, list) list_for_each_entry(req, &ring->request_list, list)
count++; count++;
if (count == 0) if (count == 0)
continue; continue;
seq_printf(m, "%s requests: %d\n", ring->name, count); seq_printf(m, "%s requests: %d\n", ring->name, count);
list_for_each_entry(rq, &ring->request_list, list) { list_for_each_entry(req, &ring->request_list, list) {
struct task_struct *task; struct task_struct *task;
rcu_read_lock(); rcu_read_lock();
task = NULL; task = NULL;
if (rq->pid) if (req->pid)
task = pid_task(rq->pid, PIDTYPE_PID); task = pid_task(req->pid, PIDTYPE_PID);
seq_printf(m, " %x @ %d: %s [%d]\n", seq_printf(m, " %x @ %d: %s [%d]\n",
rq->seqno, req->seqno,
(int) (jiffies - rq->emitted_jiffies), (int) (jiffies - req->emitted_jiffies),
task ? task->comm : "<unknown>", task ? task->comm : "<unknown>",
task ? task->pid : -1); task ? task->pid : -1);
rcu_read_unlock(); rcu_read_unlock();
...@@ -2276,6 +2282,18 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) ...@@ -2276,6 +2282,18 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0; return 0;
} }
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *ring;
int count = 0;
int i;
for_each_ring(ring, i915, i)
count += ring->irq_refcount;
return count;
}
static int i915_rps_boost_info(struct seq_file *m, void *data) static int i915_rps_boost_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -2292,6 +2310,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2292,6 +2310,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
if (ret) if (ret)
goto unlock; goto unlock;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task; struct task_struct *task;
...@@ -2301,10 +2328,16 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2301,10 +2328,16 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "%s [%d]: %d boosts%s\n", seq_printf(m, "%s [%d]: %d boosts%s\n",
task ? task->comm : "<unknown>", task ? task->comm : "<unknown>",
task ? task->pid : -1, task ? task->pid : -1,
file_priv->rps_boosts, file_priv->rps.boosts,
list_empty(&file_priv->rps_boost) ? "" : ", active"); list_empty(&file_priv->rps.link) ? "" : ", active");
rcu_read_unlock(); rcu_read_unlock();
} }
seq_printf(m, "Semaphore boosts: %d%s\n",
dev_priv->rps.semaphores.boosts,
list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
seq_printf(m, "MMIO flip boosts: %d%s\n",
dev_priv->rps.mmioflips.boosts,
list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -5154,6 +5187,9 @@ static int i915_dpcd_show(struct seq_file *m, void *data) ...@@ -5154,6 +5187,9 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
ssize_t err; ssize_t err;
int i; int i;
if (connector->status != connector_status_connected)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
const struct dpcd_block *b = &i915_dpcd_debug[i]; const struct dpcd_block *b = &i915_dpcd_debug[i];
size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
......
...@@ -595,6 +595,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv); ...@@ -595,6 +595,7 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume); bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv); static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static int i915_drm_suspend(struct drm_device *dev) static int i915_drm_suspend(struct drm_device *dev)
...@@ -811,14 +812,17 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -811,14 +812,17 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_VALLEYVIEW(dev_priv)) if (IS_VALLEYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false); ret = vlv_resume_prepare(dev_priv, false);
if (ret) if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
intel_uncore_early_sanitize(dev, true); intel_uncore_early_sanitize(dev, true);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) if (IS_BROXTON(dev))
hsw_disable_pc8(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev_priv)) else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv); ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv);
...@@ -989,7 +993,7 @@ static int i915_pm_suspend_late(struct device *dev) ...@@ -989,7 +993,7 @@ static int i915_pm_suspend_late(struct device *dev)
struct drm_device *drm_dev = dev_to_i915(dev)->dev; struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/* /*
* We have a suspedn ordering issue with the snd-hda driver also * We have a suspend ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a * requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late * parent/child relationship we currently solve this with an late
* suspend hook. * suspend hook.
...@@ -1043,6 +1047,8 @@ static int skl_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1043,6 +1047,8 @@ static int skl_suspend_complete(struct drm_i915_private *dev_priv)
*/ */
intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED); intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
skl_uninit_cdclk(dev_priv);
return 0; return 0;
} }
...@@ -1089,6 +1095,7 @@ static int skl_resume_prepare(struct drm_i915_private *dev_priv) ...@@ -1089,6 +1095,7 @@ static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
skl_init_cdclk(dev_priv);
intel_csr_load_program(dev); intel_csr_load_program(dev);
return 0; return 0;
...@@ -1586,16 +1593,15 @@ static int intel_runtime_resume(struct device *device) ...@@ -1586,16 +1593,15 @@ static int intel_runtime_resume(struct device *device)
*/ */
static int intel_suspend_complete(struct drm_i915_private *dev_priv) static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
int ret; int ret;
if (IS_BROXTON(dev)) if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv); ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev)) else if (IS_SKYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv); ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv); ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv); ret = vlv_suspend_complete(dev_priv);
else else
ret = 0; ret = 0;
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20150508" #define DRIVER_DATE "20150522"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
...@@ -272,6 +272,30 @@ struct drm_i915_private; ...@@ -272,6 +272,30 @@ struct drm_i915_private;
struct i915_mm_struct; struct i915_mm_struct;
struct i915_mmu_object; struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
struct {
spinlock_t lock;
struct list_head request_list;
/* 20ms is a fairly arbitrary limit (greater than the average frame time)
* chosen to prevent the CPU getting more than a frame ahead of the GPU
* (when using lax throttling for the frontbuffer). We also use it to
* offer free GPU waitboosts for severely congested workloads.
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
struct idr context_idr;
struct intel_rps_client {
struct list_head link;
unsigned boosts;
} rps;
struct intel_engine_cs *bsd_ring;
};
enum intel_dpll_id { enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */ /* real shared dpll ids must be >= 0 */
...@@ -309,7 +333,7 @@ struct intel_dpll_hw_state { ...@@ -309,7 +333,7 @@ struct intel_dpll_hw_state {
uint32_t cfgcr1, cfgcr2; uint32_t cfgcr1, cfgcr2;
/* bxt */ /* bxt */
uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pcsdw12; uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
}; };
struct intel_shared_dpll_config { struct intel_shared_dpll_config {
...@@ -508,7 +532,7 @@ struct drm_i915_error_state { ...@@ -508,7 +532,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer { struct drm_i915_error_buffer {
u32 size; u32 size;
u32 name; u32 name;
u32 rseqno, wseqno; u32 rseqno[I915_NUM_RINGS], wseqno;
u32 gtt_offset; u32 gtt_offset;
u32 read_domains; u32 read_domains;
u32 write_domain; u32 write_domain;
...@@ -1070,6 +1094,8 @@ struct intel_gen6_power_mgmt { ...@@ -1070,6 +1094,8 @@ struct intel_gen6_power_mgmt {
struct list_head clients; struct list_head clients;
unsigned boosts; unsigned boosts;
struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei; struct intel_rps_ei up_ei, down_ei;
...@@ -1468,7 +1494,8 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, ...@@ -1468,7 +1494,8 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
struct skl_ddb_allocation { struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES]; struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */
struct skl_ddb_entry cursor[I915_MAX_PIPES]; struct skl_ddb_entry cursor[I915_MAX_PIPES];
}; };
...@@ -1684,6 +1711,7 @@ struct drm_i915_private { ...@@ -1684,6 +1711,7 @@ struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq; unsigned int cdclk_freq;
unsigned int hpll_freq; unsigned int hpll_freq;
...@@ -1938,7 +1966,7 @@ struct drm_i915_gem_object { ...@@ -1938,7 +1966,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct list_head global_list; struct list_head global_list;
struct list_head ring_list; struct list_head ring_list[I915_NUM_RINGS];
/** Used in execbuf to temporarily hold a ref */ /** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link; struct list_head obj_exec_link;
...@@ -1949,7 +1977,7 @@ struct drm_i915_gem_object { ...@@ -1949,7 +1977,7 @@ struct drm_i915_gem_object {
* rendering and so a non-zero seqno), and is not set if it i s on * rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list. * inactive (ready to be unbound) list.
*/ */
unsigned int active:1; unsigned int active:I915_NUM_RINGS;
/** /**
* This is set if the object has been written to since last bound * This is set if the object has been written to since last bound
...@@ -2020,8 +2048,17 @@ struct drm_i915_gem_object { ...@@ -2020,8 +2048,17 @@ struct drm_i915_gem_object {
void *dma_buf_vmapping; void *dma_buf_vmapping;
int vmapping_count; int vmapping_count;
/** Breadcrumb of last rendering to the buffer. */ /** Breadcrumb of last rendering to the buffer.
struct drm_i915_gem_request *last_read_req; * There can only be one writer, but we allow for multiple readers.
* If there is a writer that necessarily implies that all other
* read requests are complete - but we may only be lazily clearing
* the read requests. A read request is naturally the most recent
* request on a ring, so we may have two different write and read
* requests on one ring where the write request is older than the
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
* */
struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
struct drm_i915_gem_request *last_write_req; struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */ /** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req; struct drm_i915_gem_request *last_fenced_req;
...@@ -2160,10 +2197,12 @@ i915_gem_request_get_ring(struct drm_i915_gem_request *req) ...@@ -2160,10 +2197,12 @@ i915_gem_request_get_ring(struct drm_i915_gem_request *req)
return req ? req->ring : NULL; return req ? req->ring : NULL;
} }
static inline void static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req) i915_gem_request_reference(struct drm_i915_gem_request *req)
{ {
kref_get(&req->ref); if (req)
kref_get(&req->ref);
return req;
} }
static inline void static inline void
...@@ -2204,22 +2243,6 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, ...@@ -2204,22 +2243,6 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
* a later patch when the call to i915_seqno_passed() is obsoleted... * a later patch when the call to i915_seqno_passed() is obsoleted...
*/ */
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
struct {
spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
struct list_head rps_boost;
struct intel_engine_cs *bsd_ring;
unsigned rps_boosts;
};
/* /*
* A command that requires special handling by the command parser. * A command that requires special handling by the command parser.
*/ */
...@@ -2375,6 +2398,7 @@ struct drm_i915_cmd_table { ...@@ -2375,6 +2398,7 @@ struct drm_i915_cmd_table {
#define SKL_REVID_C0 (0x2) #define SKL_REVID_C0 (0x2)
#define SKL_REVID_D0 (0x3) #define SKL_REVID_D0 (0x3)
#define SKL_REVID_E0 (0x4) #define SKL_REVID_E0 (0x4)
#define SKL_REVID_F0 (0x5)
#define BXT_REVID_A0 (0x0) #define BXT_REVID_A0 (0x0)
#define BXT_REVID_B0 (0x3) #define BXT_REVID_B0 (0x3)
...@@ -2445,6 +2469,9 @@ struct drm_i915_cmd_table { ...@@ -2445,6 +2469,9 @@ struct drm_i915_cmd_table {
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
INTEL_INFO(dev)->gen >= 9)
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
...@@ -2820,7 +2847,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) ...@@ -2820,7 +2847,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev); int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev);
...@@ -2838,10 +2864,13 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -2838,10 +2864,13 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter, unsigned reset_counter,
bool interruptible, bool interruptible,
s64 *timeout, s64 *timeout,
struct drm_i915_file_private *file_priv); struct intel_rps_client *rps);
int __must_check i915_wait_request(struct drm_i915_gem_request *req); int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check int __must_check
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write); bool write);
int __must_check int __must_check
......
此差异已折叠。
...@@ -753,8 +753,6 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -753,8 +753,6 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
from->legacy_hw_ctx.rcs_state->dirty = 1; from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(i915_gem_request_get_ring(
from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */ /* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
......
...@@ -34,82 +34,34 @@ int ...@@ -34,82 +34,34 @@ int
i915_verify_lists(struct drm_device *dev) i915_verify_lists(struct drm_device *dev)
{ {
static int warned; static int warned;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring;
int err = 0; int err = 0;
int i;
if (warned) if (warned)
return 0; return 0;
list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) { for_each_ring(ring, dev_priv, i) {
if (obj->base.dev != dev || list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
!atomic_read(&obj->base.refcount.refcount)) { if (obj->base.dev != dev ||
DRM_ERROR("freed render active %p\n", obj); !atomic_read(&obj->base.refcount.refcount)) {
err++; DRM_ERROR("%s: freed active obj %p\n",
break; ring->name, obj);
} else if (!obj->active || err++;
(obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) { break;
DRM_ERROR("invalid render active %p (a %d r %x)\n", } else if (!obj->active ||
obj, obj->last_read_req[ring->id] == NULL) {
obj->active, DRM_ERROR("%s: invalid active obj %p\n",
obj->base.read_domains); ring->name, obj);
err++; err++;
} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) { } else if (obj->base.write_domain) {
DRM_ERROR("invalid render active %p (w %x, gwl %d)\n", DRM_ERROR("%s: invalid write obj %p (w %x)\n",
obj, ring->name,
obj->base.write_domain, obj, obj->base.write_domain);
!list_empty(&obj->gpu_write_list)); err++;
err++; }
}
}
list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed flushing %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
obj->active,
obj->base.write_domain,
!list_empty(&obj->gpu_write_list));
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed gpu write %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
obj,
obj->active,
obj->base.write_domain);
err++;
}
}
list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
err++;
break;
} else if (obj->pin_count || obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
obj,
obj->pin_count, obj->active,
obj->base.write_domain);
err++;
} }
} }
......
...@@ -889,6 +889,7 @@ static int ...@@ -889,6 +889,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas) struct list_head *vmas)
{ {
const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -896,9 +897,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, ...@@ -896,9 +897,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring);
if (ret) if (obj->active & other_rings) {
return ret; ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false); flush_chipset |= i915_gem_clflush_object(obj, false);
......
...@@ -757,7 +757,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt, ...@@ -757,7 +757,7 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES)); WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
/* FIXME: upper bound must not overflow 32 bits */ /* FIXME: upper bound must not overflow 32 bits */
WARN_ON((start + length) >= (1ULL << 32)); WARN_ON((start + length) > (1ULL << 32));
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (pd) if (pd)
......
...@@ -219,11 +219,14 @@ i915_mmu_notifier_add(struct drm_device *dev, ...@@ -219,11 +219,14 @@ i915_mmu_notifier_add(struct drm_device *dev,
struct i915_mmu_object *mo) struct i915_mmu_object *mo)
{ {
struct interval_tree_node *it; struct interval_tree_node *it;
int ret; int ret = 0;
ret = i915_mutex_lock_interruptible(dev); /* By this point we have already done a lot of expensive setup that
if (ret) * we do not want to repeat just because the caller (e.g. X) has a
return ret; * signal pending (and partly because of that expensive setup, X
* using an interrupt timer is likely to get stuck in an EINTR loop).
*/
mutex_lock(&dev->struct_mutex);
/* Make sure we drop the final active reference (and thereby /* Make sure we drop the final active reference (and thereby
* remove the objects from the interval tree) before we do * remove the objects from the interval tree) before we do
......
...@@ -192,15 +192,20 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, ...@@ -192,15 +192,20 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err, struct drm_i915_error_buffer *err,
int count) int count)
{ {
int i;
err_printf(m, " %s [%d]:\n", name, count); err_printf(m, " %s [%d]:\n", name, count);
while (count--) { while (count--) {
err_printf(m, " %08x %8u %02x %02x %x %x", err_printf(m, " %08x %8u %02x %02x [ ",
err->gtt_offset, err->gtt_offset,
err->size, err->size,
err->read_domains, err->read_domains,
err->write_domain, err->write_domain);
err->rseqno, err->wseqno); for (i = 0; i < I915_NUM_RINGS; i++)
err_printf(m, "%02x ", err->rseqno[i]);
err_printf(m, "] %02x", err->wseqno);
err_puts(m, pin_flag(err->pinned)); err_puts(m, pin_flag(err->pinned));
err_puts(m, tiling_flag(err->tiling)); err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty)); err_puts(m, dirty_flag(err->dirty));
...@@ -681,10 +686,12 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -681,10 +686,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma) struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
int i;
err->size = obj->base.size; err->size = obj->base.size;
err->name = obj->base.name; err->name = obj->base.name;
err->rseqno = i915_gem_request_get_seqno(obj->last_read_req); for (i = 0; i < I915_NUM_RINGS; i++)
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start; err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains; err->read_domains = obj->base.read_domains;
...@@ -697,8 +704,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -697,8 +704,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->dirty = obj->dirty; err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
err->ring = obj->last_read_req ? err->ring = obj->last_write_req ?
i915_gem_request_get_ring(obj->last_read_req)->id : -1; i915_gem_request_get_ring(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
} }
......
...@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = { ...@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
}; };
static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS, [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
...@@ -1070,6 +1070,18 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) ...@@ -1070,6 +1070,18 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
return events; return events;
} }
static bool any_waiters(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int i;
for_each_ring(ring, dev_priv, i)
if (ring->irq_refcount)
return true;
return false;
}
static void gen6_pm_rps_work(struct work_struct *work) static void gen6_pm_rps_work(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
...@@ -1114,6 +1126,8 @@ static void gen6_pm_rps_work(struct work_struct *work) ...@@ -1114,6 +1126,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.efficient_freq; new_delay = dev_priv->rps.efficient_freq;
adj = 0; adj = 0;
} }
} else if (any_waiters(dev_priv)) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq; new_delay = dev_priv->rps.efficient_freq;
...@@ -1386,7 +1400,7 @@ static int i915_port_to_hotplug_shift(enum port port) ...@@ -1386,7 +1400,7 @@ static int i915_port_to_hotplug_shift(enum port port)
} }
} }
static inline enum port get_port_from_pin(enum hpd_pin pin) static enum port get_port_from_pin(enum hpd_pin pin)
{ {
switch (pin) { switch (pin) {
case HPD_PORT_B: case HPD_PORT_B:
...@@ -1400,10 +1414,10 @@ static inline enum port get_port_from_pin(enum hpd_pin pin) ...@@ -1400,10 +1414,10 @@ static inline enum port get_port_from_pin(enum hpd_pin pin)
} }
} }
static inline void intel_hpd_irq_handler(struct drm_device *dev, static void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger, u32 hotplug_trigger,
u32 dig_hotplug_reg, u32 dig_hotplug_reg,
const u32 hpd[HPD_NUM_PINS]) const u32 hpd[HPD_NUM_PINS])
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
...@@ -1743,7 +1757,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev) ...@@ -1743,7 +1757,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
*/ */
POSTING_READ(PORT_HOTPLUG_STAT); POSTING_READ(PORT_HOTPLUG_STAT);
if (IS_G4X(dev)) { if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
......
...@@ -1204,6 +1204,12 @@ enum skl_disp_power_wells { ...@@ -1204,6 +1204,12 @@ enum skl_disp_power_wells {
#define PORT_PLL_GAIN_CTL(x) ((x) << 16) #define PORT_PLL_GAIN_CTL(x) ((x) << 16)
/* PORT_PLL_8_A */ /* PORT_PLL_8_A */
#define PORT_PLL_TARGET_CNT_MASK 0x3FF #define PORT_PLL_TARGET_CNT_MASK 0x3FF
/* PORT_PLL_9_A */
#define PORT_PLL_LOCK_THRESHOLD_MASK 0xe
/* PORT_PLL_10_A */
#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
#define PORT_PLL_DCO_AMP_MASK 0x3c00
#define PORT_PLL_DCO_AMP(x) (x<<10)
#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
_PORT_PLL_0_B, \ _PORT_PLL_0_B, \
_PORT_PLL_0_C) _PORT_PLL_0_C)
...@@ -1455,6 +1461,8 @@ enum skl_disp_power_wells { ...@@ -1455,6 +1461,8 @@ enum skl_disp_power_wells {
#define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define HSW_GTT_CACHE_EN 0x4024
#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK 0x4028 #define GEN7_WR_WATERMARK 0x4028
#define GEN7_GFX_PRIO_CTRL 0x402C #define GEN7_GFX_PRIO_CTRL 0x402C
#define ARB_MODE 0x4030 #define ARB_MODE 0x4030
...@@ -5167,6 +5175,8 @@ enum skl_disp_power_wells { ...@@ -5167,6 +5175,8 @@ enum skl_disp_power_wells {
#define _PLANE_KEYMAX_2_A 0x702a0 #define _PLANE_KEYMAX_2_A 0x702a0
#define _PLANE_BUF_CFG_1_A 0x7027c #define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c #define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
#define _PLANE_CTL_1_B 0x71180 #define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280 #define _PLANE_CTL_2_B 0x71280
...@@ -5253,6 +5263,15 @@ enum skl_disp_power_wells { ...@@ -5253,6 +5263,15 @@ enum skl_disp_power_wells {
#define PLANE_BUF_CFG(pipe, plane) \ #define PLANE_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
#define _PLANE_NV12_BUF_CFG_1_B 0x71278
#define _PLANE_NV12_BUF_CFG_2_B 0x71378
#define _PLANE_NV12_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B)
#define _PLANE_NV12_BUF_CFG_2(pipe) \
_PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
/* SKL new cursor registers */ /* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c #define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c #define _CUR_BUF_CFG_B 0x7117c
...@@ -5774,6 +5793,7 @@ enum skl_disp_power_wells { ...@@ -5774,6 +5793,7 @@ enum skl_disp_power_wells {
/* GEN8 chicken */ /* GEN8 chicken */
#define HDC_CHICKEN0 0x7300 #define HDC_CHICKEN0 0x7300
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) #define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5) #define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
...@@ -6422,6 +6442,7 @@ enum skl_disp_power_wells { ...@@ -6422,6 +6442,7 @@ enum skl_disp_power_wells {
#define TRANS_DP_PORT_SEL_D (2<<29) #define TRANS_DP_PORT_SEL_D (2<<29)
#define TRANS_DP_PORT_SEL_NONE (3<<29) #define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29) #define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
#define TRANS_DP_AUDIO_ONLY (1<<26) #define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18) #define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9) #define TRANS_DP_8BPC (0<<9)
...@@ -6681,6 +6702,9 @@ enum skl_disp_power_wells { ...@@ -6681,6 +6702,9 @@ enum skl_disp_power_wells {
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 #define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16 #define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24 #define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
#define SKL_CDCLK_READY_FOR_CHANGE 0x1
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_READ_OC_PARAMS 0xc #define GEN6_READ_OC_PARAMS 0xc
......
...@@ -36,10 +36,11 @@ ...@@ -36,10 +36,11 @@
static int panel_type; static int panel_type;
static void * static const void *
find_section(struct bdb_header *bdb, int section_id) find_section(const void *_bdb, int section_id)
{ {
u8 *base = (u8 *)bdb; const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0; int index = 0;
u16 total, current_size; u16 total, current_size;
u8 current_id; u8 current_id;
...@@ -53,7 +54,7 @@ find_section(struct bdb_header *bdb, int section_id) ...@@ -53,7 +54,7 @@ find_section(struct bdb_header *bdb, int section_id)
current_id = *(base + index); current_id = *(base + index);
index++; index++;
current_size = *((u16 *)(base + index)); current_size = *((const u16 *)(base + index));
index += 2; index += 2;
if (index + current_size > total) if (index + current_size > total)
...@@ -69,7 +70,7 @@ find_section(struct bdb_header *bdb, int section_id) ...@@ -69,7 +70,7 @@ find_section(struct bdb_header *bdb, int section_id)
} }
static u16 static u16
get_blocksize(void *p) get_blocksize(const void *p)
{ {
u16 *block_ptr, block_size; u16 *block_ptr, block_size;
...@@ -204,7 +205,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb, ...@@ -204,7 +205,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
/* Try to find integrated panel data */ /* Try to find integrated panel data */
static void static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv, parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
const struct bdb_lvds_options *lvds_options; const struct bdb_lvds_options *lvds_options;
const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data *lvds_lfp_data;
...@@ -310,7 +311,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, ...@@ -310,7 +311,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
} }
static void static void
parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) parse_lfp_backlight(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{ {
const struct bdb_lfp_backlight_data *backlight_data; const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry; const struct bdb_lfp_backlight_data_entry *entry;
...@@ -348,9 +350,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -348,9 +350,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
/* Try to find sdvo panel data */ /* Try to find sdvo panel data */
static void static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv, parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct lvds_dvo_timing *dvo_timing; const struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode; struct drm_display_mode *panel_fixed_mode;
int index; int index;
...@@ -361,7 +363,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, ...@@ -361,7 +363,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
} }
if (index == -1) { if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options; const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options) if (!sdvo_lvds_options)
...@@ -402,10 +404,10 @@ static int intel_bios_ssc_frequency(struct drm_device *dev, ...@@ -402,10 +404,10 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
static void static void
parse_general_features(struct drm_i915_private *dev_priv, parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general; const struct bdb_general_features *general;
general = find_section(bdb, BDB_GENERAL_FEATURES); general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) { if (general) {
...@@ -428,9 +430,9 @@ parse_general_features(struct drm_i915_private *dev_priv, ...@@ -428,9 +430,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
static void static void
parse_general_definitions(struct drm_i915_private *dev_priv, parse_general_definitions(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct bdb_general_definitions *general; const struct bdb_general_definitions *general;
general = find_section(bdb, BDB_GENERAL_DEFINITIONS); general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) { if (general) {
...@@ -447,19 +449,19 @@ parse_general_definitions(struct drm_i915_private *dev_priv, ...@@ -447,19 +449,19 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} }
} }
static union child_device_config * static const union child_device_config *
child_device_ptr(struct bdb_general_definitions *p_defs, int i) child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
{ {
return (void *) &p_defs->devices[i * p_defs->child_dev_size]; return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
} }
static void static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct sdvo_device_mapping *p_mapping; struct sdvo_device_mapping *p_mapping;
struct bdb_general_definitions *p_defs; const struct bdb_general_definitions *p_defs;
union child_device_config *p_child; const union child_device_config *p_child;
int i, child_device_num, count; int i, child_device_num, count;
u16 block_size; u16 block_size;
...@@ -545,9 +547,9 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, ...@@ -545,9 +547,9 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
static void static void
parse_driver_features(struct drm_i915_private *dev_priv, parse_driver_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct bdb_driver_features *driver; const struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES); driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver) if (!driver)
...@@ -571,11 +573,11 @@ parse_driver_features(struct drm_i915_private *dev_priv, ...@@ -571,11 +573,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
} }
static void static void
parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{ {
struct bdb_edp *edp; const struct bdb_edp *edp;
struct edp_power_seq *edp_pps; const struct edp_power_seq *edp_pps;
struct edp_link_params *edp_link_params; const struct edp_link_params *edp_link_params;
edp = find_section(bdb, BDB_EDP); edp = find_section(bdb, BDB_EDP);
if (!edp) { if (!edp) {
...@@ -683,10 +685,10 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -683,10 +685,10 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
} }
static void static void
parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb) parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{ {
struct bdb_psr *psr; const struct bdb_psr *psr;
struct psr_table *psr_table; const struct psr_table *psr_table;
psr = find_section(bdb, BDB_PSR); psr = find_section(bdb, BDB_PSR);
if (!psr) { if (!psr) {
...@@ -794,13 +796,14 @@ static u8 *goto_next_sequence(u8 *data, int *size) ...@@ -794,13 +796,14 @@ static u8 *goto_next_sequence(u8 *data, int *size)
} }
static void static void
parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{ {
struct bdb_mipi_config *start; const struct bdb_mipi_config *start;
struct bdb_mipi_sequence *sequence; const struct bdb_mipi_sequence *sequence;
struct mipi_config *config; const struct mipi_config *config;
struct mipi_pps_data *pps; const struct mipi_pps_data *pps;
u8 *data, *seq_data; u8 *data;
const u8 *seq_data;
int i, panel_id, seq_size; int i, panel_id, seq_size;
u16 block_size; u16 block_size;
...@@ -944,7 +947,7 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -944,7 +947,7 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
} }
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
union child_device_config *it, *child = NULL; union child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
...@@ -1046,7 +1049,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1046,7 +1049,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
} }
static void parse_ddi_ports(struct drm_i915_private *dev_priv, static void parse_ddi_ports(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
enum port port; enum port port;
...@@ -1066,10 +1069,11 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, ...@@ -1066,10 +1069,11 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
static void static void
parse_device_mapping(struct drm_i915_private *dev_priv, parse_device_mapping(struct drm_i915_private *dev_priv,
struct bdb_header *bdb) const struct bdb_header *bdb)
{ {
struct bdb_general_definitions *p_defs; const struct bdb_general_definitions *p_defs;
union child_device_config *p_child, *child_dev_ptr; const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count; int i, child_device_num, count;
u16 block_size; u16 block_size;
...@@ -1126,8 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1126,8 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count; child_dev_ptr = dev_priv->vbt.child_dev + count;
count++; count++;
memcpy((void *)child_dev_ptr, (void *)p_child, memcpy(child_dev_ptr, p_child, sizeof(*p_child));
sizeof(*p_child));
} }
return; return;
} }
...@@ -1196,19 +1199,22 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { ...@@ -1196,19 +1199,22 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ } { }
}; };
static struct bdb_header *validate_vbt(char *base, size_t size, static const struct bdb_header *validate_vbt(const void __iomem *_base,
struct vbt_header *vbt, size_t size,
const char *source) const void __iomem *_vbt,
const char *source)
{ {
size_t offset; /*
struct bdb_header *bdb; * This is the one place where we explicitly discard the address space
* (__iomem) of the BIOS/VBT. (And this will cause a sparse complaint.)
if (vbt == NULL) { * From now on everything is based on 'base', and treated as regular
DRM_DEBUG_DRIVER("VBT signature missing\n"); * memory.
return NULL; */
} const void *base = (const void *) _base;
size_t offset = _vbt - _base;
const struct vbt_header *vbt = base + offset;
const struct bdb_header *bdb;
offset = (char *)vbt - base;
if (offset + sizeof(struct vbt_header) > size) { if (offset + sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n"); DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL; return NULL;
...@@ -1225,7 +1231,7 @@ static struct bdb_header *validate_vbt(char *base, size_t size, ...@@ -1225,7 +1231,7 @@ static struct bdb_header *validate_vbt(char *base, size_t size,
return NULL; return NULL;
} }
bdb = (struct bdb_header *)(base + offset); bdb = base + offset;
if (offset + bdb->bdb_size > size) { if (offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n"); DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL; return NULL;
...@@ -1236,6 +1242,22 @@ static struct bdb_header *validate_vbt(char *base, size_t size, ...@@ -1236,6 +1242,22 @@ static struct bdb_header *validate_vbt(char *base, size_t size,
return bdb; return bdb;
} }
static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
{
const struct bdb_header *bdb = NULL;
size_t i;
/* Scour memory looking for the VBT signature. */
for (i = 0; i + 4 < size; i++) {
if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
bdb = validate_vbt(bios, size, bios + i, "PCI ROM");
break;
}
}
return bdb;
}
/** /**
* intel_parse_bios - find VBT and initialize settings from the BIOS * intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device * @dev: DRM device
...@@ -1250,7 +1272,7 @@ intel_parse_bios(struct drm_device *dev) ...@@ -1250,7 +1272,7 @@ intel_parse_bios(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev; struct pci_dev *pdev = dev->pdev;
struct bdb_header *bdb = NULL; const struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL; u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev)) if (HAS_PCH_NOP(dev))
...@@ -1260,27 +1282,17 @@ intel_parse_bios(struct drm_device *dev) ...@@ -1260,27 +1282,17 @@ intel_parse_bios(struct drm_device *dev)
/* XXX Should this validation be moved to intel_opregion.c? */ /* XXX Should this validation be moved to intel_opregion.c? */
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE, bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
(struct vbt_header *)dev_priv->opregion.vbt, dev_priv->opregion.vbt, "OpRegion");
"OpRegion");
if (bdb == NULL) { if (bdb == NULL) {
size_t i, size; size_t size;
bios = pci_map_rom(pdev, &size); bios = pci_map_rom(pdev, &size);
if (!bios) if (!bios)
return -1; return -1;
/* Scour memory looking for the VBT signature */ bdb = find_vbt(bios, size);
for (i = 0; i + 4 < size; i++) {
if (memcmp(bios + i, "$VBT", 4) == 0) {
bdb = validate_vbt(bios, size,
(struct vbt_header *)(bios + i),
"PCI ROM");
break;
}
}
if (!bdb) { if (!bdb) {
pci_unmap_rom(pdev, bios); pci_unmap_rom(pdev, bios);
return -1; return -1;
......
...@@ -207,6 +207,14 @@ static void intel_disable_crt(struct intel_encoder *encoder) ...@@ -207,6 +207,14 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
} }
static void pch_disable_crt(struct intel_encoder *encoder)
{
}
static void pch_post_disable_crt(struct intel_encoder *encoder)
{
intel_disable_crt(encoder);
}
static void hsw_crt_post_disable(struct intel_encoder *encoder) static void hsw_crt_post_disable(struct intel_encoder *encoder)
{ {
...@@ -888,7 +896,12 @@ void intel_crt_init(struct drm_device *dev) ...@@ -888,7 +896,12 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA; crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config; crt->base.compute_config = intel_crt_compute_config;
crt->base.disable = intel_disable_crt; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt;
} else {
crt->base.disable = intel_disable_crt;
}
crt->base.enable = intel_enable_crt; crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev)) if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT; crt->base.hpd_pin = HPD_CRT;
......
...@@ -25,6 +25,22 @@ ...@@ -25,6 +25,22 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_reg.h" #include "i915_reg.h"
/**
* DOC: csr support for dmc
*
* Display Context Save and Restore (CSR) firmware support added from gen9
* onwards to drive newly added DMC (Display microcontroller) in display
* engine to save and restore the state of display engine when it enter into
* low-power state and comes back to normal.
*
* Firmware loading status will be one of the below states: FW_UNINITIALIZED,
* FW_LOADED, FW_FAILED.
*
* Once the firmware is written into the registers status will be moved from
* FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
* be moved to FW_FAILED.
*/
#define I915_CSR_SKL "i915/skl_dmc_ver4.bin" #define I915_CSR_SKL "i915/skl_dmc_ver4.bin"
MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_SKL);
...@@ -183,6 +199,14 @@ static char intel_get_substepping(struct drm_device *dev) ...@@ -183,6 +199,14 @@ static char intel_get_substepping(struct drm_device *dev)
return -ENODATA; return -ENODATA;
} }
/**
* intel_csr_load_status_get() - to get firmware loading status.
* @dev_priv: i915 device.
*
* This function helps to get the firmware loading status.
*
* Return: Firmware loading status.
*/
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv) enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
{ {
enum csr_state state; enum csr_state state;
...@@ -194,6 +218,13 @@ enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv) ...@@ -194,6 +218,13 @@ enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
return state; return state;
} }
/**
* intel_csr_load_status_set() - help to set firmware loading status.
* @dev_priv: i915 device.
* @state: enumeration of firmware loading status.
*
* Set the firmware loading status.
*/
void intel_csr_load_status_set(struct drm_i915_private *dev_priv, void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state) enum csr_state state)
{ {
...@@ -202,6 +233,14 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv, ...@@ -202,6 +233,14 @@ void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->csr_lock); mutex_unlock(&dev_priv->csr_lock);
} }
/**
* intel_csr_load_program() - write the firmware from memory to register.
* @dev: drm device.
*
* CSR firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
void intel_csr_load_program(struct drm_device *dev) void intel_csr_load_program(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -359,6 +398,13 @@ static void finish_csr_load(const struct firmware *fw, void *context) ...@@ -359,6 +398,13 @@ static void finish_csr_load(const struct firmware *fw, void *context)
release_firmware(fw); release_firmware(fw);
} }
/**
* intel_csr_ucode_init() - initialize the firmware loading.
* @dev: drm device.
*
* This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory.
*/
void intel_csr_ucode_init(struct drm_device *dev) void intel_csr_ucode_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -393,6 +439,13 @@ void intel_csr_ucode_init(struct drm_device *dev) ...@@ -393,6 +439,13 @@ void intel_csr_ucode_init(struct drm_device *dev)
} }
} }
/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev: drm device.
*
* Firmmware unloading includes freeing the internal momory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_device *dev) void intel_csr_ucode_fini(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
......
...@@ -1087,6 +1087,9 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1087,6 +1087,9 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p); WRPLL_DIVIDER_POST(p);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
crtc_state->dpll_hw_state.wrpll = val; crtc_state->dpll_hw_state.wrpll = val;
pll = intel_get_shared_dpll(intel_crtc, crtc_state); pll = intel_get_shared_dpll(intel_crtc, crtc_state);
...@@ -1309,6 +1312,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1309,6 +1312,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
} else /* eDP */ } else /* eDP */
return true; return true;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
crtc_state->dpll_hw_state.ctrl1 = ctrl1; crtc_state->dpll_hw_state.ctrl1 = ctrl1;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
...@@ -1334,22 +1340,17 @@ struct bxt_clk_div { ...@@ -1334,22 +1340,17 @@ struct bxt_clk_div {
uint32_t m2_frac; uint32_t m2_frac;
bool m2_frac_en; bool m2_frac_en;
uint32_t n; uint32_t n;
uint32_t prop_coef;
uint32_t int_coef;
uint32_t gain_ctl;
uint32_t targ_cnt;
uint32_t lanestagger;
}; };
/* pre-calculated values for DP linkrates */ /* pre-calculated values for DP linkrates */
static struct bxt_clk_div bxt_dp_clk_val[7] = { static struct bxt_clk_div bxt_dp_clk_val[7] = {
/* 162 */ {4, 2, 32, 1677722, 1, 1, 5, 11, 2, 9, 0xd}, /* 162 */ {4, 2, 32, 1677722, 1, 1},
/* 270 */ {4, 1, 27, 0, 0, 1, 3, 8, 1, 9, 0xd}, /* 270 */ {4, 1, 27, 0, 0, 1},
/* 540 */ {2, 1, 27, 0, 0, 1, 3, 8, 1, 9, 0x18}, /* 540 */ {2, 1, 27, 0, 0, 1},
/* 216 */ {3, 2, 32, 1677722, 1, 1, 5, 11, 2, 9, 0xd}, /* 216 */ {3, 2, 32, 1677722, 1, 1},
/* 243 */ {4, 1, 24, 1258291, 1, 1, 5, 11, 2, 9, 0xd}, /* 243 */ {4, 1, 24, 1258291, 1, 1},
/* 324 */ {4, 1, 32, 1677722, 1, 1, 5, 11, 2, 9, 0xd}, /* 324 */ {4, 1, 32, 1677722, 1, 1},
/* 432 */ {3, 1, 32, 1677722, 1, 1, 5, 11, 2, 9, 0x18} /* 432 */ {3, 1, 32, 1677722, 1, 1}
}; };
static bool static bool
...@@ -1360,6 +1361,9 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1360,6 +1361,9 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
{ {
struct intel_shared_dpll *pll; struct intel_shared_dpll *pll;
struct bxt_clk_div clk_div = {0}; struct bxt_clk_div clk_div = {0};
int vco = 0;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
uint32_t dcoampovr_en_h, dco_amp, lanestagger;
if (intel_encoder->type == INTEL_OUTPUT_HDMI) { if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock; intel_clock_t best_clock;
...@@ -1383,21 +1387,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1383,21 +1387,7 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1); clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
clk_div.m2_frac_en = clk_div.m2_frac != 0; clk_div.m2_frac_en = clk_div.m2_frac != 0;
/* FIXME: set coef, gain, targcnt based on freq band */ vco = best_clock.vco;
clk_div.prop_coef = 5;
clk_div.int_coef = 11;
clk_div.gain_ctl = 2;
clk_div.targ_cnt = 9;
if (clock > 270000)
clk_div.lanestagger = 0x18;
else if (clock > 135000)
clk_div.lanestagger = 0x0d;
else if (clock > 67000)
clk_div.lanestagger = 0x07;
else if (clock > 33000)
clk_div.lanestagger = 0x04;
else
clk_div.lanestagger = 0x02;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP) { intel_encoder->type == INTEL_OUTPUT_EDP) {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_encoder *encoder = &intel_encoder->base;
...@@ -1417,8 +1407,48 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1417,8 +1407,48 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
clk_div = bxt_dp_clk_val[0]; clk_div = bxt_dp_clk_val[0];
DRM_ERROR("Unknown link rate\n"); DRM_ERROR("Unknown link rate\n");
} }
vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
}
dco_amp = 15;
dcoampovr_en_h = 0;
if (vco >= 6200000 && vco <= 6480000) {
prop_coef = 4;
int_coef = 9;
gain_ctl = 3;
targ_cnt = 8;
} else if ((vco > 5400000 && vco < 6200000) ||
(vco >= 4800000 && vco < 5400000)) {
prop_coef = 5;
int_coef = 11;
gain_ctl = 3;
targ_cnt = 9;
if (vco >= 4800000 && vco < 5400000)
dcoampovr_en_h = 1;
} else if (vco == 5400000) {
prop_coef = 3;
int_coef = 8;
gain_ctl = 1;
targ_cnt = 9;
} else {
DRM_ERROR("Invalid VCO\n");
return false;
} }
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (clock > 270000)
lanestagger = 0x18;
else if (clock > 135000)
lanestagger = 0x0d;
else if (clock > 67000)
lanestagger = 0x07;
else if (clock > 33000)
lanestagger = 0x04;
else
lanestagger = 0x02;
crtc_state->dpll_hw_state.ebb0 = crtc_state->dpll_hw_state.ebb0 =
PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2); PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
crtc_state->dpll_hw_state.pll0 = clk_div.m2_int; crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
...@@ -1430,14 +1460,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1430,14 +1460,19 @@ bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
PORT_PLL_M2_FRAC_ENABLE; PORT_PLL_M2_FRAC_ENABLE;
crtc_state->dpll_hw_state.pll6 = crtc_state->dpll_hw_state.pll6 =
clk_div.prop_coef | PORT_PLL_INT_COEFF(clk_div.int_coef); prop_coef | PORT_PLL_INT_COEFF(int_coef);
crtc_state->dpll_hw_state.pll6 |= crtc_state->dpll_hw_state.pll6 |=
PORT_PLL_GAIN_CTL(clk_div.gain_ctl); PORT_PLL_GAIN_CTL(gain_ctl);
crtc_state->dpll_hw_state.pll8 = targ_cnt;
crtc_state->dpll_hw_state.pll8 = clk_div.targ_cnt; if (dcoampovr_en_h)
crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
crtc_state->dpll_hw_state.pcsdw12 = crtc_state->dpll_hw_state.pcsdw12 =
LANESTAGGER_STRAP_OVRD | clk_div.lanestagger; LANESTAGGER_STRAP_OVRD | lanestagger;
pll = intel_get_shared_dpll(intel_crtc, crtc_state); pll = intel_get_shared_dpll(intel_crtc, crtc_state);
if (pll == NULL) { if (pll == NULL) {
...@@ -2367,10 +2402,16 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, ...@@ -2367,10 +2402,16 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp |= pll->config.hw_state.pll8; temp |= pll->config.hw_state.pll8;
I915_WRITE(BXT_PORT_PLL(port, 8), temp); I915_WRITE(BXT_PORT_PLL(port, 8), temp);
/* temp = I915_READ(BXT_PORT_PLL(port, 9));
* FIXME: program PORT_PLL_9/i_lockthresh according to the latest temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
* specification update. temp |= (5 << 1);
*/ I915_WRITE(BXT_PORT_PLL(port, 9), temp);
temp = I915_READ(BXT_PORT_PLL(port, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->config.hw_state.pll10;
I915_WRITE(BXT_PORT_PLL(port, 10), temp);
/* Recalibrate with new settings */ /* Recalibrate with new settings */
temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
...@@ -2434,6 +2475,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ...@@ -2434,6 +2475,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
/* /*
* While we write to the group register to program all lanes at once we * While we write to the group register to program all lanes at once we
* can read only lane registers. We configure all lanes the same way, so * can read only lane registers. We configure all lanes the same way, so
...@@ -2468,6 +2510,7 @@ void intel_ddi_pll_init(struct drm_device *dev) ...@@ -2468,6 +2510,7 @@ void intel_ddi_pll_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL); uint32_t val = I915_READ(LCPLL_CTL);
int cdclk_freq;
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv); skl_shared_dplls_init(dev_priv);
...@@ -2476,12 +2519,15 @@ void intel_ddi_pll_init(struct drm_device *dev) ...@@ -2476,12 +2519,15 @@ void intel_ddi_pll_init(struct drm_device *dev)
else else
hsw_shared_dplls_init(dev_priv); hsw_shared_dplls_init(dev_priv);
DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->display.get_display_clock_speed(dev)); DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev)) {
dev_priv->skl_boot_cdclk = cdclk_freq;
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n"); DRM_ERROR("LCPLL1 is disabled\n");
else
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} else if (IS_BROXTON(dev)) { } else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev); broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev); broxton_ddi_phy_init(dev);
......
...@@ -1097,6 +1097,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) ...@@ -1097,6 +1097,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{ {
u32 ctrl1; u32 ctrl1;
memset(&pipe_config->dpll_hw_state, 0,
sizeof(pipe_config->dpll_hw_state));
pipe_config->ddi_pll_sel = SKL_DPLL0; pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->dpll_hw_state.cfgcr1 = 0; pipe_config->dpll_hw_state.cfgcr1 = 0;
pipe_config->dpll_hw_state.cfgcr2 = 0; pipe_config->dpll_hw_state.cfgcr2 = 0;
...@@ -1266,7 +1269,7 @@ static void snprintf_int_array(char *str, size_t len, ...@@ -1266,7 +1269,7 @@ static void snprintf_int_array(char *str, size_t len,
str[0] = '\0'; str[0] = '\0';
for (i = 0; i < nelem; i++) { for (i = 0; i < nelem; i++) {
int r = snprintf(str, len, "%d,", array[i]); int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
if (r >= len) if (r >= len)
return; return;
str += r; str += r;
...@@ -1567,7 +1570,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder) ...@@ -1567,7 +1570,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
/* Split out the IBX/CPU vs CPT settings */ /* Split out the IBX/CPU vs CPT settings */
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { if (IS_GEN7(dev) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH; intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
...@@ -1578,7 +1581,18 @@ static void intel_dp_prepare(struct intel_encoder *encoder) ...@@ -1578,7 +1581,18 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_ENHANCED_FRAMING; intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29; intel_dp->DP |= crtc->pipe << 29;
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) { } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
u32 trans_dp;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
trans_dp |= TRANS_DP_ENH_FRAMING;
else
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range; intel_dp->DP |= intel_dp->color_range;
...@@ -1591,14 +1605,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder) ...@@ -1591,14 +1605,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING; intel_dp->DP |= DP_ENHANCED_FRAMING;
if (!IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev))
if (crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
} else {
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe); intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
} else if (crtc->pipe == PIPE_B)
} else { intel_dp->DP |= DP_PIPEB_SELECT;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
} }
} }
...@@ -2182,41 +2192,25 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, ...@@ -2182,41 +2192,25 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN)) if (!(tmp & DP_PORT_EN))
return false; return false;
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp); *pipe = PORT_TO_PIPE_CPT(tmp);
} else if (IS_CHERRYVIEW(dev)) { } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp); enum pipe p;
} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
*pipe = PORT_TO_PIPE(tmp);
} else {
u32 trans_sel;
u32 trans_dp;
int i;
switch (intel_dp->output_reg) {
case PCH_DP_B:
trans_sel = TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
trans_sel = TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
trans_sel = TRANS_DP_PORT_SEL_D;
break;
default:
return true;
}
for_each_pipe(dev_priv, i) { for_each_pipe(dev_priv, p) {
trans_dp = I915_READ(TRANS_DP_CTL(i)); u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = i; *pipe = p;
return true; return true;
} }
} }
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg); intel_dp->output_reg);
} else if (IS_CHERRYVIEW(dev)) {
*pipe = DP_PORT_TO_PIPE_CHV(tmp);
} else {
*pipe = PORT_TO_PIPE(tmp);
} }
return true; return true;
...@@ -2237,24 +2231,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder, ...@@ -2237,24 +2231,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if (HAS_PCH_CPT(dev) && port != PORT_A) {
if (tmp & DP_SYNC_HS_HIGH) tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC; flags |= DRM_MODE_FLAG_PHSYNC;
else else
flags |= DRM_MODE_FLAG_NHSYNC; flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & DP_SYNC_VS_HIGH) if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC; flags |= DRM_MODE_FLAG_PVSYNC;
else else
flags |= DRM_MODE_FLAG_NVSYNC; flags |= DRM_MODE_FLAG_NVSYNC;
} else { } else {
tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); if (tmp & DP_SYNC_HS_HIGH)
if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC; flags |= DRM_MODE_FLAG_PHSYNC;
else else
flags |= DRM_MODE_FLAG_NHSYNC; flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) if (tmp & DP_SYNC_VS_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC; flags |= DRM_MODE_FLAG_PVSYNC;
else else
flags |= DRM_MODE_FLAG_NVSYNC; flags |= DRM_MODE_FLAG_NVSYNC;
...@@ -2419,7 +2413,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, ...@@ -2419,7 +2413,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
} }
I915_WRITE(DP_TP_CTL(port), temp); I915_WRITE(DP_TP_CTL(port), temp);
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { } else if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT; *DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
...@@ -3848,6 +3843,7 @@ static void ...@@ -3848,6 +3843,7 @@ static void
intel_dp_link_down(struct intel_dp *intel_dp) intel_dp_link_down(struct intel_dp *intel_dp)
{ {
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -3861,36 +3857,41 @@ intel_dp_link_down(struct intel_dp *intel_dp) ...@@ -3861,36 +3857,41 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { if ((IS_GEN7(dev) && port == PORT_A) ||
(HAS_PCH_CPT(dev) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT; DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else { } else {
if (IS_CHERRYVIEW(dev)) if (IS_CHERRYVIEW(dev))
DP &= ~DP_LINK_TRAIN_MASK_CHV; DP &= ~DP_LINK_TRAIN_MASK_CHV;
else else
DP &= ~DP_LINK_TRAIN_MASK; DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); DP |= DP_LINK_TRAIN_PAT_IDLE;
} }
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg); POSTING_READ(intel_dp->output_reg);
if (HAS_PCH_IBX(dev) && DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { I915_WRITE(intel_dp->output_reg, DP);
/* Hardware workaround: leaving our transcoder select POSTING_READ(intel_dp->output_reg);
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A. /*
* * HW workaround for IBX, we need to move the port
* Combine this with another hardware workaround: * to transcoder A after disabling it to allow the
* transcoder select bit can only be cleared while the * matching HDMI port to be enabled on transcoder A.
* port is enabled. */
*/ if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
DP &= ~DP_PIPEB_SELECT; /* always enable with pattern 1 (as per spec) */
DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
DP &= ~DP_PORT_EN;
I915_WRITE(intel_dp->output_reg, DP); I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg); POSTING_READ(intel_dp->output_reg);
} }
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay); msleep(intel_dp->panel_power_down_delay);
} }
...@@ -4142,7 +4143,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp) ...@@ -4142,7 +4143,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
if (!drm_dp_dpcd_write(&intel_dp->aux, if (!drm_dp_dpcd_write(&intel_dp->aux,
DP_TEST_EDID_CHECKSUM, DP_TEST_EDID_CHECKSUM,
&intel_connector->detect_edid->checksum, &intel_connector->detect_edid->checksum,
1)); 1))
DRM_DEBUG_KMS("Failed to write EDID checksum\n"); DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
...@@ -5814,12 +5815,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -5814,12 +5815,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector); intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */ /* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { if (HAS_DP_MST(dev) &&
if (port == PORT_B || port == PORT_C || port == PORT_D) { (port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port, intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id); intel_connector->base.base.id);
}
}
if (!intel_edp_init_connector(intel_dp, intel_connector)) { if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux); drm_dp_aux_unregister(&intel_dp->aux);
......
...@@ -459,8 +459,10 @@ struct intel_pipe_wm { ...@@ -459,8 +459,10 @@ struct intel_pipe_wm {
}; };
struct intel_mmio_flip { struct intel_mmio_flip {
struct drm_i915_gem_request *req;
struct work_struct work; struct work_struct work;
struct drm_i915_private *i915;
struct drm_i915_gem_request *req;
struct intel_crtc *crtc;
}; };
struct skl_pipe_wm { struct skl_pipe_wm {
...@@ -544,7 +546,6 @@ struct intel_crtc { ...@@ -544,7 +546,6 @@ struct intel_crtc {
} wm; } wm;
int scanline_offset; int scanline_offset;
struct intel_mmio_flip mmio_flip;
struct intel_crtc_atomic_commit atomic; struct intel_crtc_atomic_commit atomic;
...@@ -555,7 +556,15 @@ struct intel_crtc { ...@@ -555,7 +556,15 @@ struct intel_crtc {
struct intel_plane_wm_parameters { struct intel_plane_wm_parameters {
uint32_t horiz_pixels; uint32_t horiz_pixels;
uint32_t vert_pixels; uint32_t vert_pixels;
/*
* For packed pixel formats:
* bytes_per_pixel - holds bytes per pixel
* For planar pixel formats:
* bytes_per_pixel - holds bytes per pixel for uv-plane
* y_bytes_per_pixel - holds bytes per pixel for y-plane
*/
uint8_t bytes_per_pixel; uint8_t bytes_per_pixel;
uint8_t y_bytes_per_pixel;
bool enabled; bool enabled;
bool scaled; bool scaled;
u64 tiling; u64 tiling;
...@@ -1059,9 +1068,6 @@ intel_rotation_90_or_270(unsigned int rotation) ...@@ -1059,9 +1068,6 @@ intel_rotation_90_or_270(unsigned int rotation)
return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)); return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
} }
unsigned int
intel_tile_height(struct drm_device *dev, uint32_t bits_per_pixel,
uint64_t fb_modifier);
void intel_create_rotation_property(struct drm_device *dev, void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane); struct intel_plane *plane);
...@@ -1112,6 +1118,8 @@ void broxton_ddi_phy_init(struct drm_device *dev); ...@@ -1112,6 +1118,8 @@ void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev); void broxton_ddi_phy_uninit(struct drm_device *dev);
void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
...@@ -1359,9 +1367,10 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv); ...@@ -1359,9 +1367,10 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv, void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv); struct intel_rps_client *rps,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev, void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *rq); struct drm_i915_gem_request *req);
void ilk_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
......
...@@ -162,59 +162,41 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) ...@@ -162,59 +162,41 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
#endif #endif
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp) static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
{ {
u32 m, n, p; unsigned int calc_m = 0, calc_p = 0;
u32 ref_clk; unsigned int m, n = 1, p;
u32 error; int ref_clk = 25000;
u32 tmp_error; int delta = target_dsi_clk;
int target_dsi_clk;
int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
u32 m_seed; u32 m_seed;
/* dsi_clk is expected in KHZ */ /* target_dsi_clk is expected in kHz */
if (dsi_clk < 300000 || dsi_clk > 1150000) { if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n"); DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG; return -ECHRNG;
} }
ref_clk = 25000; for (m = 62; m <= 92 && delta; m++) {
target_dsi_clk = dsi_clk; for (p = 2; p <= 6 && delta; p++) {
error = 0xFFFFFFFF; /*
tmp_error = 0xFFFFFFFF; * Find the optimal m and p divisors with minimal delta
calc_m = 0; * +/- the required clock
calc_p = 0; */
int calc_dsi_clk = (m * ref_clk) / (p * n);
for (m = 62; m <= 92; m++) { int d = abs(target_dsi_clk - calc_dsi_clk);
for (p = 2; p <= 6; p++) { if (d < delta) {
/* Find the optimal m and p divisors delta = d;
with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
if (calc_dsi_clk == target_dsi_clk) {
calc_m = m;
calc_p = p;
error = 0;
break;
} else
tmp_error = abs(target_dsi_clk - calc_dsi_clk);
if (tmp_error < error) {
error = tmp_error;
calc_m = m; calc_m = m;
calc_p = p; calc_p = p;
} }
} }
if (error == 0)
break;
} }
/* register has log2(N1), this works fine for powers of two */
n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62]; m_seed = lfsr_converts[calc_m - 62];
n = 1;
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT | dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT; m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0; return 0;
...@@ -331,7 +313,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) ...@@ -331,7 +313,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk; u32 dsi_clock, pclk;
u32 pll_ctl, pll_div; u32 pll_ctl, pll_div;
u32 m = 0, p = 0; u32 m = 0, p = 0, n;
int refclk = 25000; int refclk = 25000;
int i; int i;
...@@ -346,6 +328,10 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) ...@@ -346,6 +328,10 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
/* N1 divisor */
n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
n = 1 << n; /* register has log2(N1) */
/* mask out the other bits and extract the M1 divisor */ /* mask out the other bits and extract the M1 divisor */
pll_div &= DSI_PLL_M1_DIV_MASK; pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
...@@ -373,7 +359,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) ...@@ -373,7 +359,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
m = i + 62; m = i + 62;
dsi_clock = (m * refclk) / p; dsi_clock = (m * refclk) / (p * n);
/* pixel_format and pipe_bpp should agree */ /* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
......
...@@ -873,59 +873,59 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) ...@@ -873,59 +873,59 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp; u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
temp = I915_READ(intel_hdmi->hdmi_reg); temp = I915_READ(intel_hdmi->hdmi_reg);
/* HW workaround for IBX, we need to move the port to transcoder A temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
/* Again we need to write this twice. */
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(dev, pipe);
else
msleep(50);
}
}
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->hdmi_reg);
}
temp &= ~enable_bits;
I915_WRITE(intel_hdmi->hdmi_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg); POSTING_READ(intel_hdmi->hdmi_reg);
/* HW workaround, need to write this twice for issue that may result /*
* in first write getting masked. * HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/ */
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE;
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp); I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg); POSTING_READ(intel_hdmi->hdmi_reg);
} }
} }
static void g4x_disable_hdmi(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
intel_disable_hdmi(encoder);
}
static void pch_disable_hdmi(struct intel_encoder *encoder)
{
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
if (crtc->config->has_audio)
intel_audio_codec_disable(encoder);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder)
{
intel_disable_hdmi(encoder);
}
static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{ {
struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_device *dev = intel_hdmi_to_dev(hdmi);
...@@ -1806,7 +1806,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) ...@@ -1806,7 +1806,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
DRM_MODE_ENCODER_TMDS); DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_hdmi_compute_config; intel_encoder->compute_config = intel_hdmi_compute_config;
intel_encoder->disable = intel_disable_hdmi; if (HAS_PCH_SPLIT(dev)) {
intel_encoder->disable = pch_disable_hdmi;
intel_encoder->post_disable = pch_post_disable_hdmi;
} else {
intel_encoder->disable = g4x_disable_hdmi;
}
intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config; intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev)) {
......
...@@ -49,6 +49,19 @@ static const struct gmbus_pin gmbus_pins[] = { ...@@ -49,6 +49,19 @@ static const struct gmbus_pin gmbus_pins[] = {
[GMBUS_PIN_DPD] = { "dpd", GPIOF }, [GMBUS_PIN_DPD] = { "dpd", GPIOF },
}; };
static const struct gmbus_pin gmbus_pins_bdw[] = {
[GMBUS_PIN_VGADDC] = { "vga", GPIOA },
[GMBUS_PIN_DPC] = { "dpc", GPIOD },
[GMBUS_PIN_DPB] = { "dpb", GPIOE },
[GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
static const struct gmbus_pin gmbus_pins_skl[] = {
[GMBUS_PIN_DPC] = { "dpc", GPIOD },
[GMBUS_PIN_DPB] = { "dpb", GPIOE },
[GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
static const struct gmbus_pin gmbus_pins_bxt[] = { static const struct gmbus_pin gmbus_pins_bxt[] = {
[GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB }, [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
[GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC }, [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
...@@ -61,6 +74,10 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, ...@@ -61,6 +74,10 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{ {
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
return &gmbus_pins_bxt[pin]; return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
else else
return &gmbus_pins[pin]; return &gmbus_pins[pin];
} }
...@@ -72,6 +89,10 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, ...@@ -72,6 +89,10 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt); size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
else else
size = ARRAY_SIZE(gmbus_pins); size = ARRAY_SIZE(gmbus_pins);
......
...@@ -394,6 +394,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) ...@@ -394,6 +394,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
assert_spin_locked(&ring->execlist_lock); assert_spin_locked(&ring->execlist_lock);
/*
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
if (list_empty(&ring->execlist_queue)) if (list_empty(&ring->execlist_queue))
return; return;
...@@ -622,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -622,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct list_head *vmas) struct list_head *vmas)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma; struct i915_vma *vma;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
bool flush_chipset = false; bool flush_chipset = false;
...@@ -630,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, ...@@ -630,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
ret = i915_gem_object_sync(obj, ring); if (obj->active & other_rings) {
if (ret) ret = i915_gem_object_sync(obj, ring);
return ret; if (ret)
return ret;
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false); flush_chipset |= i915_gem_clflush_object(obj, false);
...@@ -673,7 +682,8 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -673,7 +682,8 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret, new_space; unsigned space;
int ret;
if (intel_ring_space(ringbuf) >= bytes) if (intel_ring_space(ringbuf) >= bytes)
return 0; return 0;
...@@ -684,14 +694,13 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -684,14 +694,13 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* from multiple ringbuffers. Here, we must ignore any that * from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering. * aren't from the ringbuffer we're considering.
*/ */
struct intel_context *ctx = request->ctx; if (request->ringbuf != ringbuf)
if (ctx->engine[ring->id].ringbuf != ringbuf)
continue; continue;
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
new_space = __intel_ring_space(request->postfix, ringbuf->tail, space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size); ringbuf->size);
if (new_space >= bytes) if (space >= bytes)
break; break;
} }
...@@ -702,11 +711,8 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -702,11 +711,8 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests_ring(ring); ringbuf->space = space;
return 0;
WARN_ON(intel_ring_space(ringbuf) < new_space);
return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
} }
/* /*
......
...@@ -228,7 +228,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -228,7 +228,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
ret = i915_wait_request(overlay->last_flip_req); ret = i915_wait_request(overlay->last_flip_req);
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev);
i915_gem_request_assign(&overlay->last_flip_req, NULL); i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0; return 0;
...@@ -376,7 +375,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) ...@@ -376,7 +375,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
ret = i915_wait_request(overlay->last_flip_req); ret = i915_wait_request(overlay->last_flip_req);
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail) if (overlay->flip_tail)
overlay->flip_tail(overlay); overlay->flip_tail(overlay);
......
...@@ -1946,7 +1946,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) ...@@ -1946,7 +1946,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
int ilk_wm_max_level(const struct drm_device *dev) int ilk_wm_max_level(const struct drm_device *dev)
{ {
/* how many WM levels are we expecting */ /* how many WM levels are we expecting */
if (IS_GEN9(dev)) if (INTEL_INFO(dev)->gen >= 9)
return 7; return 7;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4; return 4;
...@@ -2639,8 +2639,18 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, ...@@ -2639,8 +2639,18 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
} }
static unsigned int static unsigned int
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p) skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
{ {
/* for planar format */
if (p->y_bytes_per_pixel) {
if (y) /* y-plane data rate */
return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
else /* uv-plane data rate */
return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
}
/* for packed formats */
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
} }
...@@ -2663,7 +2673,10 @@ skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, ...@@ -2663,7 +2673,10 @@ skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
if (!p->enabled) if (!p->enabled)
continue; continue;
total_data_rate += skl_plane_relative_data_rate(p); total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
if (p->y_bytes_per_pixel) {
total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
}
} }
return total_data_rate; return total_data_rate;
...@@ -2682,6 +2695,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, ...@@ -2682,6 +2695,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks; uint16_t alloc_size, start, cursor_blocks;
uint16_t minimum[I915_MAX_PLANES]; uint16_t minimum[I915_MAX_PLANES];
uint16_t y_minimum[I915_MAX_PLANES];
unsigned int total_data_rate; unsigned int total_data_rate;
int plane; int plane;
...@@ -2710,6 +2724,8 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, ...@@ -2710,6 +2724,8 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
minimum[plane] = 8; minimum[plane] = 8;
alloc_size -= minimum[plane]; alloc_size -= minimum[plane];
y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
alloc_size -= y_minimum[plane];
} }
/* /*
...@@ -2723,16 +2739,17 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, ...@@ -2723,16 +2739,17 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
start = alloc->start; start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p; const struct intel_plane_wm_parameters *p;
unsigned int data_rate; unsigned int data_rate, y_data_rate;
uint16_t plane_blocks; uint16_t plane_blocks, y_plane_blocks = 0;
p = &params->plane[plane]; p = &params->plane[plane];
if (!p->enabled) if (!p->enabled)
continue; continue;
data_rate = skl_plane_relative_data_rate(p); data_rate = skl_plane_relative_data_rate(p, 0);
/* /*
* allocation for (packed formats) or (uv-plane part of planar format):
* promote the expression to 64 bits to avoid overflowing, the * promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1 * result is < available as data_rate / total_data_rate < 1
*/ */
...@@ -2744,6 +2761,22 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, ...@@ -2744,6 +2761,22 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
ddb->plane[pipe][plane].end = start + plane_blocks; ddb->plane[pipe][plane].end = start + plane_blocks;
start += plane_blocks; start += plane_blocks;
/*
* allocation for y_plane part of planar format:
*/
if (p->y_bytes_per_pixel) {
y_data_rate = skl_plane_relative_data_rate(p, 1);
y_plane_blocks = y_minimum[plane];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
ddb->y_plane[pipe][plane].start = start;
ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
start += y_plane_blocks;
}
} }
} }
...@@ -2856,13 +2889,18 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, ...@@ -2856,13 +2889,18 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
fb = crtc->primary->state->fb; fb = crtc->primary->state->fb;
/* For planar: Bpp is for uv plane, y_Bpp is for y plane */
if (fb) { if (fb) {
p->plane[0].enabled = true; p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8; p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
drm_format_plane_cpp(fb->pixel_format, 0) : 0;
p->plane[0].tiling = fb->modifier[0]; p->plane[0].tiling = fb->modifier[0];
} else { } else {
p->plane[0].enabled = false; p->plane[0].enabled = false;
p->plane[0].bytes_per_pixel = 0; p->plane[0].bytes_per_pixel = 0;
p->plane[0].y_bytes_per_pixel = 0;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE; p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
} }
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
...@@ -2870,6 +2908,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, ...@@ -2870,6 +2908,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->plane[0].rotation = crtc->primary->state->rotation; p->plane[0].rotation = crtc->primary->state->rotation;
fb = crtc->cursor->state->fb; fb = crtc->cursor->state->fb;
p->cursor.y_bytes_per_pixel = 0;
if (fb) { if (fb) {
p->cursor.enabled = true; p->cursor.enabled = true;
p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
...@@ -2905,22 +2944,25 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, ...@@ -2905,22 +2944,25 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t plane_bytes_per_line, plane_blocks_per_line; uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines; uint32_t res_blocks, res_lines;
uint32_t selected_result; uint32_t selected_result;
uint8_t bytes_per_pixel;
if (latency == 0 || !p->active || !p_params->enabled) if (latency == 0 || !p->active || !p_params->enabled)
return false; return false;
bytes_per_pixel = p_params->y_bytes_per_pixel ?
p_params->y_bytes_per_pixel :
p_params->bytes_per_pixel;
method1 = skl_wm_method1(p->pixel_rate, method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel, bytes_per_pixel,
latency); latency);
method2 = skl_wm_method2(p->pixel_rate, method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal, p->pipe_htotal,
p_params->horiz_pixels, p_params->horiz_pixels,
p_params->bytes_per_pixel, bytes_per_pixel,
p_params->tiling, p_params->tiling,
latency); latency);
plane_bytes_per_line = p_params->horiz_pixels * plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
p_params->bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
...@@ -3137,10 +3179,14 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, ...@@ -3137,10 +3179,14 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
new->plane_trans[pipe][i]); new->plane_trans[pipe][i]);
I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
for (i = 0; i < intel_num_planes(crtc); i++) for (i = 0; i < intel_num_planes(crtc); i++) {
skl_ddb_entry_write(dev_priv, skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, i), PLANE_BUF_CFG(pipe, i),
&new->ddb.plane[pipe][i]); &new->ddb.plane[pipe][i]);
skl_ddb_entry_write(dev_priv,
PLANE_NV12_BUF_CFG(pipe, i),
&new->ddb.y_plane[pipe][i]);
}
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
&new->ddb.cursor[pipe]); &new->ddb.cursor[pipe]);
...@@ -3298,6 +3344,7 @@ static bool skl_update_pipe_wm(struct drm_crtc *crtc, ...@@ -3298,6 +3344,7 @@ static bool skl_update_pipe_wm(struct drm_crtc *crtc,
return false; return false;
intel_crtc->wm.skl_active = *pipe_wm; intel_crtc->wm.skl_active = *pipe_wm;
return true; return true;
} }
...@@ -3391,8 +3438,16 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -3391,8 +3438,16 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
intel_plane->wm.scaled = scaled; intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width; intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height; intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
/* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
intel_plane->wm.bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
intel_plane->wm.y_bytes_per_pixel =
(fb && fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
/* /*
* Framebuffer can be NULL on plane disable, but it does not * Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case. * matter for watermarks if we assume no tiling in that case.
...@@ -4042,51 +4097,25 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val) ...@@ -4042,51 +4097,25 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
} }
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
* *
* * If Gfx is Idle, then * * If Gfx is Idle, then
* 1. Mask Turbo interrupts * 1. Forcewake Media well.
* 2. Bring up Gfx clock * 2. Request idle freq.
* 3. Change the freq to Rpn and wait till P-Unit updates freq * 3. Release Forcewake of Media well.
* 4. Clear the Force GFX CLK ON bit so that Gfx can down
* 5. Unmask Turbo interrupts
*/ */
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
u32 val = dev_priv->rps.idle_freq; u32 val = dev_priv->rps.idle_freq;
/* CHV and latest VLV don't need to force the gfx clock */
if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
valleyview_set_rps(dev_priv->dev, val);
return;
}
/*
* When we are idle. Drop to min voltage state.
*/
if (dev_priv->rps.cur_freq <= val) if (dev_priv->rps.cur_freq <= val)
return; return;
/* Mask turbo interrupt so that they will not come in between */ /* Wake up the media well, as that takes a lot less
I915_WRITE(GEN6_PMINTRMSK, * power than the Render well. */
gen6_sanitize_rps_pm_mask(dev_priv, ~0)); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
valleyview_set_rps(dev_priv->dev, val);
vlv_force_gfx_clock(dev_priv, true); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
dev_priv->rps.cur_freq = val;
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
gen6_set_rps_thresholds(dev_priv, val);
vlv_force_gfx_clock(dev_priv, false);
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
} }
void gen6_rps_busy(struct drm_i915_private *dev_priv) void gen6_rps_busy(struct drm_i915_private *dev_priv)
...@@ -4121,22 +4150,29 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) ...@@ -4121,22 +4150,29 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
} }
void gen6_rps_boost(struct drm_i915_private *dev_priv, void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv) struct intel_rps_client *rps,
unsigned long submitted)
{ {
u32 val; u32 val;
/* Force a RPS boost (and don't count it against the client) if
* the GPU is severely congested.
*/
if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
rps = NULL;
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
val = dev_priv->rps.max_freq_softlimit; val = dev_priv->rps.max_freq_softlimit;
if (dev_priv->rps.enabled && if (dev_priv->rps.enabled &&
dev_priv->mm.busy && dev_priv->mm.busy &&
dev_priv->rps.cur_freq < val && dev_priv->rps.cur_freq < val &&
(file_priv == NULL || list_empty(&file_priv->rps_boost))) { (rps == NULL || list_empty(&rps->link))) {
intel_set_rps(dev_priv->dev, val); intel_set_rps(dev_priv->dev, val);
dev_priv->rps.last_adj = 0; dev_priv->rps.last_adj = 0;
if (file_priv != NULL) { if (rps != NULL) {
list_add(&file_priv->rps_boost, &dev_priv->rps.clients); list_add(&rps->link, &dev_priv->rps.clients);
file_priv->rps_boosts++; rps->boosts++;
} else } else
dev_priv->rps.boosts++; dev_priv->rps.boosts++;
} }
...@@ -4714,24 +4750,6 @@ static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) ...@@ -4714,24 +4750,6 @@ static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
return rp1; return rp1;
} }
static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
u32 val, rpn;
if (dev->pdev->revision >= 0x20) {
val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
FB_GFX_FREQ_FUSE_MASK);
} else { /* For pre-production hardware */
val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
}
return rpn;
}
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{ {
u32 val, rp1; u32 val, rp1;
...@@ -4983,7 +5001,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) ...@@ -4983,7 +5001,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq); dev_priv->rps.rp1_freq);
dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); /* PUnit validated range is only [RPe, RP0] */
dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq); dev_priv->rps.min_freq);
...@@ -6155,10 +6174,9 @@ static void broadwell_init_clock_gating(struct drm_device *dev) ...@@ -6155,10 +6174,9 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe; enum pipe pipe;
uint32_t misccpctl;
I915_WRITE(WM3_LP_ILK, 0); ilk_init_lp_watermarks(dev);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/* WaSwitchSolVfFArbitrationPriority:bdw */ /* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
...@@ -6187,6 +6205,22 @@ static void broadwell_init_clock_gating(struct drm_device *dev) ...@@ -6187,6 +6205,22 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE); GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* WaProgramL3SqcReg1Default:bdw
* WaTempDisableDOPClkGating:bdw
*/
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
/*
* WaGttCachingOffByDefault:bdw
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
lpt_init_clock_gating(dev); lpt_init_clock_gating(dev);
} }
...@@ -6462,6 +6496,12 @@ static void cherryview_init_clock_gating(struct drm_device *dev) ...@@ -6462,6 +6496,12 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */ /* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE); GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
} }
static void g4x_init_clock_gating(struct drm_device *dev) static void g4x_init_clock_gating(struct drm_device *dev)
...@@ -6830,34 +6870,39 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) ...@@ -6830,34 +6870,39 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
struct request_boost { struct request_boost {
struct work_struct work; struct work_struct work;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *req;
}; };
static void __intel_rps_boost_work(struct work_struct *work) static void __intel_rps_boost_work(struct work_struct *work)
{ {
struct request_boost *boost = container_of(work, struct request_boost, work); struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(boost->rq, true)) if (!i915_gem_request_completed(req, true))
gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL); gen6_rps_boost(to_i915(req->ring->dev), NULL,
req->emitted_jiffies);
i915_gem_request_unreference__unlocked(boost->rq); i915_gem_request_unreference__unlocked(req);
kfree(boost); kfree(boost);
} }
void intel_queue_rps_boost_for_request(struct drm_device *dev, void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *rq) struct drm_i915_gem_request *req)
{ {
struct request_boost *boost; struct request_boost *boost;
if (rq == NULL || INTEL_INFO(dev)->gen < 6) if (req == NULL || INTEL_INFO(dev)->gen < 6)
return;
if (i915_gem_request_completed(req, true))
return; return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC); boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL) if (boost == NULL)
return; return;
i915_gem_request_reference(rq); i915_gem_request_reference(req);
boost->rq = rq; boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work); INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(to_i915(dev)->wq, &boost->work); queue_work(to_i915(dev)->wq, &boost->work);
...@@ -6872,6 +6917,8 @@ void intel_pm_setup(struct drm_device *dev) ...@@ -6872,6 +6917,8 @@ void intel_pm_setup(struct drm_device *dev)
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work); intel_gen6_powersave_work);
INIT_LIST_HEAD(&dev_priv->rps.clients); INIT_LIST_HEAD(&dev_priv->rps.clients);
INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
dev_priv->pm.suspended = false; dev_priv->pm.suspended = false;
} }
...@@ -853,9 +853,6 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) ...@@ -853,9 +853,6 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4); GEN6_WIZ_HASHING_16x4);
/* WaProgramL3SqcReg1Default:bdw */
WA_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
return 0; return 0;
} }
...@@ -918,6 +915,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) ...@@ -918,6 +915,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
/* WaDisablePartialInstShootdown:skl,bxt */ /* WaDisablePartialInstShootdown:skl,bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
...@@ -961,15 +959,19 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) ...@@ -961,15 +959,19 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE); GEN9_CCS_TLB_PREFETCH_ENABLE);
/* /* WaDisableMaskBasedCammingInRCC:skl,bxt */
* FIXME: don't apply the following on BXT for stepping C. On BXT A0 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
* the flag reads back as 0. (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
*/
/* WaDisableMaskBasedCammingInRCC:sklC,bxtA */
if (INTEL_REVID(dev) == SKL_REVID_C0 || IS_BROXTON(dev))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE); PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
return 0; return 0;
} }
...@@ -1060,10 +1062,6 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) ...@@ -1060,10 +1062,6 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
} }
/* WaForceContextSaveRestoreNonCoherent:bxt */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
return 0; return 0;
} }
...@@ -2102,15 +2100,16 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) ...@@ -2102,15 +2100,16 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{ {
struct intel_ringbuffer *ringbuf = ring->buffer; struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret, new_space; unsigned space;
int ret;
if (intel_ring_space(ringbuf) >= n) if (intel_ring_space(ringbuf) >= n)
return 0; return 0;
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
new_space = __intel_ring_space(request->postfix, ringbuf->tail, space = __intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size); ringbuf->size);
if (new_space >= n) if (space >= n)
break; break;
} }
...@@ -2121,10 +2120,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) ...@@ -2121,10 +2120,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests_ring(ring); ringbuf->space = space;
WARN_ON(intel_ring_space(ringbuf) < new_space);
return 0; return 0;
} }
...@@ -2168,10 +2164,14 @@ int intel_ring_idle(struct intel_engine_cs *ring) ...@@ -2168,10 +2164,14 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return 0; return 0;
req = list_entry(ring->request_list.prev, req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request, struct drm_i915_gem_request,
list); list);
return i915_wait_request(req); /* Make sure we do not trigger any retires */
return __i915_wait_request(req,
atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
to_i915(ring->dev)->mm.interruptible,
NULL, NULL);
} }
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
......
...@@ -771,7 +771,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, ...@@ -771,7 +771,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100)) if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n", DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state, state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
...@@ -1029,7 +1029,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, ...@@ -1029,7 +1029,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
if (wait_for(COND, 100)) if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n", DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state, state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
......
...@@ -243,6 +243,14 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) ...@@ -243,6 +243,14 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
if (intel_sdvo->sdvo_reg == PCH_SDVOB) { if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
I915_WRITE(intel_sdvo->sdvo_reg, val); I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg); POSTING_READ(intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev)) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
POSTING_READ(intel_sdvo->sdvo_reg);
}
return; return;
} }
...@@ -1429,6 +1437,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) ...@@ -1429,6 +1437,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp; u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0); intel_sdvo_set_active_outputs(intel_sdvo, 0);
...@@ -1437,35 +1446,34 @@ static void intel_disable_sdvo(struct intel_encoder *encoder) ...@@ -1437,35 +1446,34 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
DRM_MODE_DPMS_OFF); DRM_MODE_DPMS_OFF);
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
/* HW workaround for IBX, we need to move the port to
* transcoder A before disabling it. */
if (HAS_PCH_IBX(encoder->base.dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Again we need to write this twice. */
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(encoder->base.dev, pipe);
else
msleep(50);
}
}
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
/*
* HW workaround for IBX, we need to move the port
* to transcoder A after disabling it to allow the
* matching DP port to be enabled on transcoder A.
*/
if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
temp &= ~SDVO_PIPE_B_SELECT;
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
} }
} }
static void pch_disable_sdvo(struct intel_encoder *encoder)
{
}
static void pch_post_disable_sdvo(struct intel_encoder *encoder)
{
intel_disable_sdvo(encoder);
}
static void intel_enable_sdvo(struct intel_encoder *encoder) static void intel_enable_sdvo(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
...@@ -1478,14 +1486,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) ...@@ -1478,14 +1486,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
bool success; bool success;
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) { temp |= SDVO_ENABLE;
/* HW workaround for IBX, we need to move the port intel_sdvo_write_sdvox(intel_sdvo, temp);
* to transcoder A before disabling it, so restore it here. */
if (HAS_PCH_IBX(dev))
temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
...@@ -2988,7 +2991,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) ...@@ -2988,7 +2991,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
} }
intel_encoder->compute_config = intel_sdvo_compute_config; intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo; if (HAS_PCH_SPLIT(dev)) {
intel_encoder->disable = pch_disable_sdvo;
intel_encoder->post_disable = pch_post_disable_sdvo;
} else {
intel_encoder->disable = intel_disable_sdvo;
}
intel_encoder->pre_enable = intel_sdvo_pre_enable; intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo; intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state; intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
......
...@@ -229,8 +229,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, ...@@ -229,8 +229,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
if (intel_rotation_90_or_270(rotation)) { if (intel_rotation_90_or_270(rotation)) {
/* stride: Surface height in tiles */ /* stride: Surface height in tiles */
tile_height = intel_tile_height(dev, fb->bits_per_pixel, tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0]); fb->modifier[0]);
stride = DIV_ROUND_UP(fb->height, tile_height); stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h; plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1); x_offset = stride * tile_height - y - (src_h + 1);
...@@ -770,6 +770,7 @@ intel_check_sprite_plane(struct drm_plane *plane, ...@@ -770,6 +770,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
const struct drm_rect *clip = &state->clip; const struct drm_rect *clip = &state->clip;
int hscale, vscale; int hscale, vscale;
int max_scale, min_scale; int max_scale, min_scale;
bool can_scale;
int pixel_size; int pixel_size;
int ret; int ret;
...@@ -794,18 +795,29 @@ intel_check_sprite_plane(struct drm_plane *plane, ...@@ -794,18 +795,29 @@ intel_check_sprite_plane(struct drm_plane *plane,
return -EINVAL; return -EINVAL;
} }
/* setup can_scale, min_scale, max_scale */
if (INTEL_INFO(dev)->gen >= 9) {
/* use scaler when colorkey is not required */
if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
can_scale = 1;
min_scale = 1;
max_scale = skl_max_scale(intel_crtc, crtc_state);
} else {
can_scale = 0;
min_scale = DRM_PLANE_HELPER_NO_SCALING;
max_scale = DRM_PLANE_HELPER_NO_SCALING;
}
} else {
can_scale = intel_plane->can_scale;
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
}
/* /*
* FIXME the following code does a bunch of fuzzy adjustments to the * FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether * coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead. * more strict checking should be done instead.
*/ */
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
if (INTEL_INFO(dev)->gen >= 9) {
min_scale = 1;
max_scale = skl_max_scale(intel_crtc, crtc_state);
}
drm_rect_rotate(src, fb->width << 16, fb->height << 16, drm_rect_rotate(src, fb->width << 16, fb->height << 16,
state->base.rotation); state->base.rotation);
...@@ -876,7 +888,7 @@ intel_check_sprite_plane(struct drm_plane *plane, ...@@ -876,7 +888,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
* Must keep src and dst the * Must keep src and dst the
* same if we can't scale. * same if we can't scale.
*/ */
if (!intel_plane->can_scale) if (!can_scale)
crtc_w &= ~1; crtc_w &= ~1;
if (crtc_w == 0) if (crtc_w == 0)
...@@ -888,7 +900,7 @@ intel_check_sprite_plane(struct drm_plane *plane, ...@@ -888,7 +900,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) { if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes; unsigned int width_bytes;
WARN_ON(!intel_plane->can_scale); WARN_ON(!can_scale);
/* FIXME interlacing min height is 6 */ /* FIXME interlacing min height is 6 */
...@@ -1052,7 +1064,7 @@ int intel_plane_restore(struct drm_plane *plane) ...@@ -1052,7 +1064,7 @@ int intel_plane_restore(struct drm_plane *plane)
plane->state->src_w, plane->state->src_h); plane->state->src_w, plane->state->src_h);
} }
static uint32_t ilk_plane_formats[] = { static const uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV, DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU, DRM_FORMAT_YVYU,
...@@ -1060,7 +1072,7 @@ static uint32_t ilk_plane_formats[] = { ...@@ -1060,7 +1072,7 @@ static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_VYUY, DRM_FORMAT_VYUY,
}; };
static uint32_t snb_plane_formats[] = { static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV, DRM_FORMAT_YUYV,
...@@ -1069,7 +1081,7 @@ static uint32_t snb_plane_formats[] = { ...@@ -1069,7 +1081,7 @@ static uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY, DRM_FORMAT_VYUY,
}; };
static uint32_t vlv_plane_formats[] = { static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_RGB565, DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888, DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ARGB8888,
......
...@@ -286,11 +286,9 @@ ...@@ -286,11 +286,9 @@
INTEL_SKL_GT2_IDS(info), \ INTEL_SKL_GT2_IDS(info), \
INTEL_SKL_GT3_IDS(info) INTEL_SKL_GT3_IDS(info)
#define INTEL_BXT_IDS(info) \ #define INTEL_BXT_IDS(info) \
INTEL_VGA_DEVICE(0x0A84, info), \ INTEL_VGA_DEVICE(0x0A84, info), \
INTEL_VGA_DEVICE(0x0A85, info), \ INTEL_VGA_DEVICE(0x1A84, info), \
INTEL_VGA_DEVICE(0x0A86, info), \ INTEL_VGA_DEVICE(0x5A84, info)
INTEL_VGA_DEVICE(0x0A87, info)
#endif /* _I915_PCIIDS_H */ #endif /* _I915_PCIIDS_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册