提交 36a5fdf7 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

This time really the last i915 batch for v4.15:

- PSR state tracking in crtc state (Ville)
- Fix eviction when the GGTT is idle but full (Chris)
- BDW DP aux channel timeout fix (James)
- LSPCON detection fixes (Shashank)
- Use for_each_pipe to iterate over pipes (Mika Kahola)
- Replace *_reference/unreference() or *_ref/unref with _get/put() (Harsha)
- Refactoring and preparation for DDI encoder type cleanup (Ville)
- Broadwell DDI FDI buf translation fix (Chris)
- Read CSB and CSB write pointer from HWSP in GVT-g VM if available (Weinan)
- GuC/HuC firmware loader refactoring (Michal)
- Make shrinking more effective and not stall so much (Chris)
- Cannonlake PLL fixes (Rodrigo)
- DP MST connector error propagation fixes (James)
- Convert timers to use timer_setup (Kees Cook)
- Skylake plane enable/disable unification (Juha-Pekka)
- Fix to actually free driver internal objects when requested (Chris)
- DDI buf trans refactoring (Ville)
- Skip waking the device to service pwrite (Chris)
- Improve DSI VBT backlight parsing abstraction (Madhav)
- Cannonlake VBT DDC pin mapping fix (Rodrigo)

* tag 'drm-intel-next-2017-10-23' of git://anongit.freedesktop.org/drm/drm-intel: (87 commits)
  drm/i915: Update DRIVER_DATE to 20171023
  drm/i915/cnl: Map VBT DDC Pin to BSpec DDC Pin.
  drm/i915: Let's use more enum intel_dpll_id pll_id.
  drm/i915: Use existing DSI backlight ports info
  drm/i915: Parse DSI backlight/cabc ports.
  drm/i915: Skip waking the device to service pwrite
  drm/i915/crt: split compute_config hook by platforms
  drm/i915: remove g4x lowfreq_avail and has_pipe_cxsr
  drm/i915: Drop the redundant hdmi prefix/suffix from a lot of variables
  drm/i915: Unify error handling for missing DDI buf trans tables
  drm/i915: Centralize the SKL DDI A/E vs. B/C/D buf trans handling
  drm/i915: Kill off the BXT buf_trans default_index
  drm/i915: Pass encoder type to cnl_ddi_vswing_sequence() explicitly
  drm/i915: Integrate BXT into intel_ddi_dp_voltage_max()
  drm/i915: Pass the level to intel_prepare_hdmi_ddi_buffers()
  drm/i915: Pass the encoder type explicitly to skl_set_iboost()
  drm/i915: Extract intel_ddi_get_buf_trans_hdmi()
  drm/i915: Relocate intel_ddi_get_buf_trans_*() functions
  drm/i915: Flush the idle-worker for debugfs/i915_drop_caches
  drm/i915: adjust get_crtc_fence_y_offset() to use base.y instead of crtc.y
  ...
...@@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, ...@@ -410,6 +410,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
{ {
u8 data; u8 data;
int ret = 0; int ret = 0;
int retry;
if (!mode) { if (!mode) {
DRM_ERROR("NULL input\n"); DRM_ERROR("NULL input\n");
...@@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter, ...@@ -417,10 +418,19 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
} }
/* Read Status: i2c over aux */ /* Read Status: i2c over aux */
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_LSPCON_CURRENT_MODE, for (retry = 0; retry < 6; retry++) {
&data, sizeof(data)); if (retry)
usleep_range(500, 1000);
ret = drm_dp_dual_mode_read(adapter,
DP_DUAL_MODE_LSPCON_CURRENT_MODE,
&data, sizeof(data));
if (!ret)
break;
}
if (ret < 0) { if (ret < 0) {
DRM_ERROR("LSPCON read(0x80, 0x41) failed\n"); DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
return -EFAULT; return -EFAULT;
} }
......
...@@ -64,7 +64,7 @@ i915-y += intel_uc.o \ ...@@ -64,7 +64,7 @@ i915-y += intel_uc.o \
intel_guc.o \ intel_guc.o \
intel_guc_ct.o \ intel_guc_ct.o \
intel_guc_log.o \ intel_guc_log.o \
intel_guc_loader.o \ intel_guc_fw.o \
intel_huc.o \ intel_huc.o \
i915_guc_submission.o i915_guc_submission.o
......
...@@ -83,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj) ...@@ -83,7 +83,7 @@ static char get_active_flag(struct drm_i915_gem_object *obj)
static char get_pin_flag(struct drm_i915_gem_object *obj) static char get_pin_flag(struct drm_i915_gem_object *obj)
{ {
return obj->pin_display ? 'p' : ' '; return obj->pin_global ? 'p' : ' ';
} }
static char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj)
...@@ -180,8 +180,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -180,8 +180,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
pin_count++; pin_count++;
} }
seq_printf(m, " (pinned x %d)", pin_count); seq_printf(m, " (pinned x %d)", pin_count);
if (obj->pin_display) if (obj->pin_global)
seq_printf(m, " (display)"); seq_printf(m, " (global)");
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
...@@ -271,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) ...@@ -271,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
goto out; goto out;
total_obj_size = total_gtt_size = count = 0; total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
spin_lock(&dev_priv->mm.obj_lock);
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (count == total) if (count == total)
break; break;
...@@ -283,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) ...@@ -283,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
total_gtt_size += i915_gem_obj_total_ggtt_size(obj); total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
} }
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
if (count == total) if (count == total)
break; break;
...@@ -293,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) ...@@ -293,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
objects[count++] = obj; objects[count++] = obj;
total_obj_size += obj->base.size; total_obj_size += obj->base.size;
} }
spin_unlock(&dev_priv->mm.obj_lock);
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
...@@ -454,7 +457,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -454,7 +457,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mapped_size = mapped_count = 0; mapped_size = mapped_count = 0;
purgeable_size = purgeable_count = 0; purgeable_size = purgeable_count = 0;
huge_size = huge_count = 0; huge_size = huge_count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
spin_lock(&dev_priv->mm.obj_lock);
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
size += obj->base.size; size += obj->base.size;
++count; ++count;
...@@ -477,11 +482,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -477,11 +482,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
size = count = dpy_size = dpy_count = 0; size = count = dpy_size = dpy_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
size += obj->base.size; size += obj->base.size;
++count; ++count;
if (obj->pin_display) { if (obj->pin_global) {
dpy_size += obj->base.size; dpy_size += obj->base.size;
++dpy_count; ++dpy_count;
} }
...@@ -502,6 +507,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -502,6 +507,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
page_sizes |= obj->mm.page_sizes.sg; page_sizes |= obj->mm.page_sizes.sg;
} }
} }
spin_unlock(&dev_priv->mm.obj_lock);
seq_printf(m, "%u bound objects, %llu bytes\n", seq_printf(m, "%u bound objects, %llu bytes\n",
count, size); count, size);
seq_printf(m, "%u purgeable objects, %llu bytes\n", seq_printf(m, "%u purgeable objects, %llu bytes\n",
...@@ -512,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -512,7 +519,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
huge_count, huge_count,
stringify_page_sizes(page_sizes, buf, sizeof(buf)), stringify_page_sizes(page_sizes, buf, sizeof(buf)),
huge_size); huge_size);
seq_printf(m, "%u display objects (pinned), %llu bytes\n", seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size); dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n", seq_printf(m, "%llu [%llu] gtt total\n",
...@@ -568,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) ...@@ -568,32 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_i915_private *dev_priv = node_to_i915(node); struct drm_i915_private *dev_priv = node_to_i915(node);
struct drm_device *dev = &dev_priv->drm; struct drm_device *dev = &dev_priv->drm;
bool show_pin_display_only = !!node->info_ent->data; struct drm_i915_gem_object **objects;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size; u64 total_obj_size, total_gtt_size;
unsigned long nobject, n;
int count, ret; int count, ret;
nobject = READ_ONCE(dev_priv->mm.object_count);
objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
if (!objects)
return -ENOMEM;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;
total_obj_size = total_gtt_size = count = 0; count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { spin_lock(&dev_priv->mm.obj_lock);
if (show_pin_display_only && !obj->pin_display) list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
continue; objects[count++] = obj;
if (count == nobject)
break;
}
spin_unlock(&dev_priv->mm.obj_lock);
total_obj_size = total_gtt_size = 0;
for (n = 0; n < count; n++) {
obj = objects[n];
seq_puts(m, " "); seq_puts(m, " ");
describe_obj(m, obj); describe_obj(m, obj);
seq_putc(m, '\n'); seq_putc(m, '\n');
total_obj_size += obj->base.size; total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_total_ggtt_size(obj); total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
count++;
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size); count, total_obj_size, total_gtt_size);
kvfree(objects);
return 0; return 0;
} }
...@@ -643,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) ...@@ -643,54 +664,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0; return 0;
} }
static void print_request(struct seq_file *m,
struct drm_i915_gem_request *rq,
const char *prefix)
{
seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name);
}
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_request *req;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
any = 0;
for_each_engine(engine, dev_priv, id) {
int count;
count = 0;
list_for_each_entry(req, &engine->timeline->requests, link)
count++;
if (count == 0)
continue;
seq_printf(m, "%s requests: %d\n", engine->name, count);
list_for_each_entry(req, &engine->timeline->requests, link)
print_request(m, req, " ");
any++;
}
mutex_unlock(&dev->struct_mutex);
if (any == 0)
seq_puts(m, "No requests\n");
return 0;
}
static void i915_ring_seqno_info(struct seq_file *m, static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
...@@ -2386,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data) ...@@ -2386,27 +2359,13 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data) static int i915_huc_load_status_info(struct seq_file *m, void *data)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; struct drm_printer p;
if (!HAS_HUC_UCODE(dev_priv)) if (!HAS_HUC_UCODE(dev_priv))
return 0; return 0;
seq_puts(m, "HuC firmware status:\n"); p = drm_seq_file_printer(m);
seq_printf(m, "\tpath: %s\n", huc_fw->path); intel_uc_fw_dump(&dev_priv->huc.fw, &p);
seq_printf(m, "\tfetch: %s\n",
intel_uc_fw_status_repr(huc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
intel_uc_fw_status_repr(huc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
huc_fw->major_ver_found, huc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
huc_fw->header_offset, huc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
huc_fw->ucode_offset, huc_fw->ucode_size);
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
huc_fw->rsa_offset, huc_fw->rsa_size);
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
...@@ -2418,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) ...@@ -2418,29 +2377,14 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data) static int i915_guc_load_status_info(struct seq_file *m, void *data)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; struct drm_printer p;
u32 tmp, i; u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv)) if (!HAS_GUC_UCODE(dev_priv))
return 0; return 0;
seq_printf(m, "GuC firmware status:\n"); p = drm_seq_file_printer(m);
seq_printf(m, "\tpath: %s\n", intel_uc_fw_dump(&dev_priv->guc.fw, &p);
guc_fw->path);
seq_printf(m, "\tfetch: %s\n",
intel_uc_fw_status_repr(guc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
intel_uc_fw_status_repr(guc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
guc_fw->major_ver_found, guc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
guc_fw->header_offset, guc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
guc_fw->ucode_offset, guc_fw->ucode_size);
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
guc_fw->rsa_offset, guc_fw->rsa_size);
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
...@@ -3310,6 +3254,16 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3310,6 +3254,16 @@ static int i915_engine_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_shrinker_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
return 0;
}
static int i915_semaphore_status(struct seq_file *m, void *unused) static int i915_semaphore_status(struct seq_file *m, void *unused)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
...@@ -4225,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, ...@@ -4225,18 +4179,20 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
i915_ring_test_irq_get, i915_ring_test_irq_set, i915_ring_test_irq_get, i915_ring_test_irq_set,
"0x%08llx\n"); "0x%08llx\n");
#define DROP_UNBOUND 0x1 #define DROP_UNBOUND BIT(0)
#define DROP_BOUND 0x2 #define DROP_BOUND BIT(1)
#define DROP_RETIRE 0x4 #define DROP_RETIRE BIT(2)
#define DROP_ACTIVE 0x8 #define DROP_ACTIVE BIT(3)
#define DROP_FREED 0x10 #define DROP_FREED BIT(4)
#define DROP_SHRINK_ALL 0x20 #define DROP_SHRINK_ALL BIT(5)
#define DROP_IDLE BIT(6)
#define DROP_ALL (DROP_UNBOUND | \ #define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \ DROP_BOUND | \
DROP_RETIRE | \ DROP_RETIRE | \
DROP_ACTIVE | \ DROP_ACTIVE | \
DROP_FREED | \ DROP_FREED | \
DROP_SHRINK_ALL) DROP_SHRINK_ALL |\
DROP_IDLE)
static int static int
i915_drop_caches_get(void *data, u64 *val) i915_drop_caches_get(void *data, u64 *val)
{ {
...@@ -4252,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4252,7 +4208,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = &dev_priv->drm; struct drm_device *dev = &dev_priv->drm;
int ret = 0; int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val); DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
/* No need to check and wait for gpu resets, only libdrm auto-restarts /* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */ * on ioctls on -EAGAIN. */
...@@ -4283,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4283,6 +4240,9 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(dev_priv); i915_gem_shrink_all(dev_priv);
fs_reclaim_release(GFP_KERNEL); fs_reclaim_release(GFP_KERNEL);
if (val & DROP_IDLE)
drain_delayed_work(&dev_priv->gt.idle_work);
if (val & DROP_FREED) { if (val & DROP_FREED) {
synchronize_rcu(); synchronize_rcu();
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
...@@ -4751,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -4751,9 +4711,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0}, {"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0},
...@@ -4791,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -4791,6 +4749,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dmc_info", i915_dmc_info, 0}, {"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0}, {"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0}, {"i915_engine_info", i915_engine_info, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0}, {"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0},
......
...@@ -80,8 +80,8 @@ ...@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20171012" #define DRIVER_DATE "20171023"
#define DRIVER_TIMESTAMP 1507831511 #define DRIVER_TIMESTAMP 1508748913
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -785,7 +785,6 @@ struct intel_csr { ...@@ -785,7 +785,6 @@ struct intel_csr {
func(has_logical_ring_contexts); \ func(has_logical_ring_contexts); \
func(has_logical_ring_preemption); \ func(has_logical_ring_preemption); \
func(has_overlay); \ func(has_overlay); \
func(has_pipe_cxsr); \
func(has_pooled_eu); \ func(has_pooled_eu); \
func(has_psr); \ func(has_psr); \
func(has_rc6); \ func(has_rc6); \
...@@ -1108,6 +1107,16 @@ struct intel_fbc { ...@@ -1108,6 +1107,16 @@ struct intel_fbc {
int src_w; int src_w;
int src_h; int src_h;
bool visible; bool visible;
/*
* Display surface base address adjustement for
* pageflips. Note that on gen4+ this only adjusts up
* to a tile, offsets within a tile are handled in
* the hw itself (with the TILEOFF register).
*/
int adjusted_x;
int adjusted_y;
int y;
} plane; } plane;
struct { struct {
...@@ -1490,6 +1499,9 @@ struct i915_gem_mm { ...@@ -1490,6 +1499,9 @@ struct i915_gem_mm {
* always the inner lock when overlapping with struct_mutex. */ * always the inner lock when overlapping with struct_mutex. */
struct mutex stolen_lock; struct mutex stolen_lock;
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
spinlock_t obj_lock;
/** List of all objects in gtt_space. Used to restore gtt /** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */ * mappings on resume */
struct list_head bound_list; struct list_head bound_list;
...@@ -1510,6 +1522,7 @@ struct i915_gem_mm { ...@@ -1510,6 +1522,7 @@ struct i915_gem_mm {
*/ */
struct llist_head free_list; struct llist_head free_list;
struct work_struct free_work; struct work_struct free_work;
spinlock_t free_lock;
/** /**
* Small stash of WC pages * Small stash of WC pages
...@@ -1765,6 +1778,8 @@ struct intel_vbt_data { ...@@ -1765,6 +1778,8 @@ struct intel_vbt_data {
u16 panel_id; u16 panel_id;
struct mipi_config *config; struct mipi_config *config;
struct mipi_pps_data *pps; struct mipi_pps_data *pps;
u16 bl_ports;
u16 cabc_ports;
u8 seq_version; u8 seq_version;
u32 size; u32 size;
u8 *data; u8 *data;
...@@ -1960,13 +1975,7 @@ struct i915_wa_reg { ...@@ -1960,13 +1975,7 @@ struct i915_wa_reg {
u32 mask; u32 mask;
}; };
/* #define I915_MAX_WA_REGS 16
* RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
* allowing it for RCS as we don't foresee any requirement of having
* a whitelist for other engines. When it is really required for
* other engines then the limit need to be increased.
*/
#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
struct i915_workarounds { struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS]; struct i915_wa_reg reg[I915_MAX_WA_REGS];
...@@ -3077,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3077,6 +3086,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define CNL_REVID_A0 0x0 #define CNL_REVID_A0 0x0
#define CNL_REVID_B0 0x1 #define CNL_REVID_B0 0x1
#define CNL_REVID_C0 0x2
#define IS_CNL_REVID(p, since, until) \ #define IS_CNL_REVID(p, since, until) \
(IS_CANNONLAKE(p) && IS_REVID(p, since, until)) (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
...@@ -3168,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3168,7 +3178,6 @@ intel_info(const struct drm_i915_private *dev_priv)
#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
#define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr)
#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7) #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7)
...@@ -3565,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) ...@@ -3565,10 +3574,16 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
return __i915_gem_object_get_pages(obj); return __i915_gem_object_get_pages(obj);
} }
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}
static inline void static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(!obj->mm.pages); GEM_BUG_ON(!i915_gem_object_has_pages(obj));
atomic_inc(&obj->mm.pages_pin_count); atomic_inc(&obj->mm.pages_pin_count);
} }
...@@ -3582,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) ...@@ -3582,8 +3597,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count); atomic_dec(&obj->mm.pages_pin_count);
} }
......
...@@ -56,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) ...@@ -56,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true; return true;
return obj->pin_display; return obj->pin_global; /* currently in use by HW, keep flushed */
} }
static int static int
...@@ -1240,7 +1240,23 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1240,7 +1240,23 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
intel_runtime_pm_get(i915); if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
* waking/resuming is very slow (worst-case 10-100 ms
* depending on PCI sleeps and our own resume time).
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
if (!intel_runtime_pm_get_if_in_use(i915)) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE | PIN_MAPPABLE |
PIN_NONFAULT | PIN_NONFAULT |
...@@ -1257,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1257,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret) if (ret)
goto out_unlock; goto out_rpm;
GEM_BUG_ON(!node.allocated); GEM_BUG_ON(!node.allocated);
} }
...@@ -1320,8 +1336,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1320,8 +1336,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
} else { } else {
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
out_unlock: out_rpm:
intel_runtime_pm_put(i915); intel_runtime_pm_put(i915);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
return ret; return ret;
} }
...@@ -1537,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) ...@@ -1537,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct list_head *list; struct list_head *list;
struct i915_vma *vma; struct i915_vma *vma;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma)) if (!i915_vma_is_ggtt(vma))
break; break;
...@@ -1551,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) ...@@ -1551,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
} }
i915 = to_i915(obj->base.dev); i915 = to_i915(obj->base.dev);
spin_lock(&i915->mm.obj_lock);
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
list_move_tail(&obj->global_link, list); list_move_tail(&obj->mm.link, list);
spin_unlock(&i915->mm.obj_lock);
} }
/** /**
...@@ -2196,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) ...@@ -2196,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
struct address_space *mapping; struct address_space *mapping;
lockdep_assert_held(&obj->mm.lock); lockdep_assert_held(&obj->mm.lock);
GEM_BUG_ON(obj->mm.pages); GEM_BUG_ON(i915_gem_object_has_pages(obj));
switch (obj->mm.madv) { switch (obj->mm.madv) {
case I915_MADV_DONTNEED: case I915_MADV_DONTNEED:
...@@ -2253,13 +2274,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -2253,13 +2274,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
enum i915_mm_subclass subclass) enum i915_mm_subclass subclass)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages; struct sg_table *pages;
if (i915_gem_object_has_pinned_pages(obj)) if (i915_gem_object_has_pinned_pages(obj))
return; return;
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
if (!READ_ONCE(obj->mm.pages)) if (!i915_gem_object_has_pages(obj))
return; return;
/* May be called by shrinker from within get_pages() (on another bo) */ /* May be called by shrinker from within get_pages() (on another bo) */
...@@ -2273,6 +2295,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, ...@@ -2273,6 +2295,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
pages = fetch_and_zero(&obj->mm.pages); pages = fetch_and_zero(&obj->mm.pages);
GEM_BUG_ON(!pages); GEM_BUG_ON(!pages);
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
spin_unlock(&i915->mm.obj_lock);
if (obj->mm.mapping) { if (obj->mm.mapping) {
void *ptr; void *ptr;
...@@ -2507,7 +2533,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -2507,7 +2533,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages; obj->mm.pages = pages;
if (i915_gem_object_is_tiled(obj) && if (i915_gem_object_is_tiled(obj) &&
to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(obj->mm.quirked); GEM_BUG_ON(obj->mm.quirked);
__i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
obj->mm.quirked = true; obj->mm.quirked = true;
...@@ -2529,8 +2555,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -2529,8 +2555,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
if (obj->mm.page_sizes.phys & ~0u << i) if (obj->mm.page_sizes.phys & ~0u << i)
obj->mm.page_sizes.sg |= BIT(i); obj->mm.page_sizes.sg |= BIT(i);
} }
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
spin_lock(&i915->mm.obj_lock);
list_add(&obj->mm.link, &i915->mm.unbound_list);
spin_unlock(&i915->mm.obj_lock);
} }
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
...@@ -2563,7 +2592,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2563,7 +2592,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err) if (err)
return err; return err;
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
...@@ -2648,7 +2677,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2648,7 +2677,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
type &= ~I915_MAP_OVERRIDE; type &= ~I915_MAP_OVERRIDE;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
ret = ____i915_gem_object_get_pages(obj); ret = ____i915_gem_object_get_pages(obj);
...@@ -2660,7 +2689,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2660,7 +2689,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
atomic_inc(&obj->mm.pages_pin_count); atomic_inc(&obj->mm.pages_pin_count);
pinned = false; pinned = false;
} }
GEM_BUG_ON(!obj->mm.pages); GEM_BUG_ON(!i915_gem_object_has_pages(obj));
ptr = page_unpack_bits(obj->mm.mapping, &has_type); ptr = page_unpack_bits(obj->mm.mapping, &has_type);
if (ptr && has_type != type) { if (ptr && has_type != type) {
...@@ -2715,7 +2744,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, ...@@ -2715,7 +2744,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
* allows it to avoid the cost of retrieving a page (either swapin * allows it to avoid the cost of retrieving a page (either swapin
* or clearing-before-use) before it is overwritten. * or clearing-before-use) before it is overwritten.
*/ */
if (READ_ONCE(obj->mm.pages)) if (i915_gem_object_has_pages(obj))
return -ENODEV; return -ENODEV;
if (obj->mm.madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
...@@ -3090,7 +3119,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) ...@@ -3090,7 +3119,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request) static void nop_submit_request(struct drm_i915_gem_request *request)
{ {
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request); i915_gem_request_submit(request);
...@@ -3100,7 +3128,6 @@ static void nop_complete_submit_request(struct drm_i915_gem_request *request) ...@@ -3100,7 +3128,6 @@ static void nop_complete_submit_request(struct drm_i915_gem_request *request)
{ {
unsigned long flags; unsigned long flags;
GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags); spin_lock_irqsave(&request->engine->timeline->lock, flags);
...@@ -3498,7 +3525,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) ...@@ -3498,7 +3525,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{ {
if (!READ_ONCE(obj->pin_display)) if (!READ_ONCE(obj->pin_global))
return; return;
mutex_lock(&obj->base.dev->struct_mutex); mutex_lock(&obj->base.dev->struct_mutex);
...@@ -3865,10 +3892,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3865,10 +3892,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
/* Mark the pin_display early so that we account for the /* Mark the global pin early so that we account for the
* display coherency whilst setting up the cache domains. * display coherency whilst setting up the cache domains.
*/ */
obj->pin_display++; obj->pin_global++;
/* The display engine is not coherent with the LLC cache on gen6. As /* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is * a result, we make sure that the pinning that is about to occur is
...@@ -3884,7 +3911,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3884,7 +3911,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
I915_CACHE_WT : I915_CACHE_NONE); I915_CACHE_WT : I915_CACHE_NONE);
if (ret) { if (ret) {
vma = ERR_PTR(ret); vma = ERR_PTR(ret);
goto err_unpin_display; goto err_unpin_global;
} }
/* As the user may map the buffer once pinned in the display plane /* As the user may map the buffer once pinned in the display plane
...@@ -3915,7 +3942,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3915,7 +3942,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
} }
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err_unpin_display; goto err_unpin_global;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment); vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
...@@ -3930,8 +3957,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3930,8 +3957,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return vma; return vma;
err_unpin_display: err_unpin_global:
obj->pin_display--; obj->pin_global--;
return vma; return vma;
} }
...@@ -3940,10 +3967,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) ...@@ -3940,10 +3967,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{ {
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(vma->obj->pin_display == 0)) if (WARN_ON(vma->obj->pin_global == 0))
return; return;
if (--vma->obj->pin_display == 0) if (--vma->obj->pin_global == 0)
vma->display_alignment = I915_GTT_MIN_ALIGNMENT; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */ /* Bump the LRU to try and avoid premature eviction whilst flipping */
...@@ -4283,7 +4310,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4283,7 +4310,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (err) if (err)
goto out; goto out;
if (obj->mm.pages && if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) && i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) { if (obj->mm.madv == I915_MADV_WILLNEED) {
...@@ -4302,7 +4329,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4302,7 +4329,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv; obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */ /* if the object is no longer attached, discard its backing storage */
if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) if (obj->mm.madv == I915_MADV_DONTNEED &&
!i915_gem_object_has_pages(obj))
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
args->retained = obj->mm.madv != __I915_MADV_PURGED; args->retained = obj->mm.madv != __I915_MADV_PURGED;
...@@ -4328,7 +4356,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -4328,7 +4356,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
{ {
mutex_init(&obj->mm.lock); mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list); INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link); INIT_LIST_HEAD(&obj->batch_pool_link);
...@@ -4483,13 +4510,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4483,13 +4510,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
{ {
struct drm_i915_gem_object *obj, *on; struct drm_i915_gem_object *obj, *on;
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915); intel_runtime_pm_get(i915);
llist_for_each_entry(obj, freed, freed) { llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn; struct i915_vma *vma, *vn;
trace_i915_gem_object_destroy(obj); trace_i915_gem_object_destroy(obj);
mutex_lock(&i915->drm.struct_mutex);
GEM_BUG_ON(i915_gem_object_is_active(obj)); GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn, list_for_each_entry_safe(vma, vn,
&obj->vma_list, obj_link) { &obj->vma_list, obj_link) {
...@@ -4500,14 +4528,20 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4500,14 +4528,20 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma_list)); GEM_BUG_ON(!list_empty(&obj->vma_list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
list_del(&obj->global_link); /* This serializes freeing with the shrinker. Since the free
} * is delayed, first by RCU then by the workqueue, we want the
intel_runtime_pm_put(i915); * shrinker to be able to free pages of unreferenced objects,
mutex_unlock(&i915->drm.struct_mutex); * or else we may oom whilst there are plenty of deferred
* freed objects.
*/
if (i915_gem_object_has_pages(obj)) {
spin_lock(&i915->mm.obj_lock);
list_del_init(&obj->mm.link);
spin_unlock(&i915->mm.obj_lock);
}
cond_resched(); mutex_unlock(&i915->drm.struct_mutex);
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
...@@ -4519,7 +4553,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4519,7 +4553,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
atomic_set(&obj->mm.pages_pin_count, 0); atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(obj->mm.pages); GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->base.import_attach) if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL); drm_prime_gem_destroy(&obj->base, NULL);
...@@ -4530,16 +4564,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -4530,16 +4564,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
kfree(obj->bit_17); kfree(obj->bit_17);
i915_gem_object_free(obj); i915_gem_object_free(obj);
if (on)
cond_resched();
} }
intel_runtime_pm_put(i915);
} }
static void i915_gem_flush_free_objects(struct drm_i915_private *i915) static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{ {
struct llist_node *freed; struct llist_node *freed;
freed = llist_del_all(&i915->mm.free_list); /* Free the oldest, most stale object to keep the free_list short */
if (unlikely(freed)) freed = NULL;
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
/* Only one consumer of llist_del_first() allowed */
spin_lock(&i915->mm.free_lock);
freed = llist_del_first(&i915->mm.free_list);
spin_unlock(&i915->mm.free_lock);
}
if (unlikely(freed)) {
freed->next = NULL;
__i915_gem_free_objects(i915, freed); __i915_gem_free_objects(i915, freed);
}
} }
static void __i915_gem_free_work(struct work_struct *work) static void __i915_gem_free_work(struct work_struct *work)
...@@ -4840,6 +4887,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ...@@ -4840,6 +4887,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv); init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context); BUG_ON(!dev_priv->kernel_context);
if (i915_terminally_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
goto out;
}
ret = i915_ppgtt_init_hw(dev_priv); ret = i915_ppgtt_init_hw(dev_priv);
if (ret) { if (ret) {
...@@ -4938,8 +4989,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -4938,8 +4989,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* wedged. But we only want to do this where the GPU is angry, * wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail. * for all other failure, such as an allocation failure, bail.
*/ */
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
i915_gem_set_wedged(dev_priv); DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
i915_gem_set_wedged(dev_priv);
}
ret = 0; ret = 0;
} }
...@@ -5039,11 +5092,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) ...@@ -5039,11 +5092,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
goto err_priorities; goto err_priorities;
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
spin_lock_init(&dev_priv->mm.obj_lock);
spin_lock_init(&dev_priv->mm.free_lock);
init_llist_head(&dev_priv->mm.free_list); init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.userfault_list); INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
INIT_DELAYED_WORK(&dev_priv->gt.retire_work, INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler); i915_gem_retire_work_handler);
INIT_DELAYED_WORK(&dev_priv->gt.idle_work, INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
...@@ -5137,12 +5194,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv) ...@@ -5137,12 +5194,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex); spin_lock(&dev_priv->mm.obj_lock);
for (p = phases; *p; p++) { for (p = phases; *p; p++) {
list_for_each_entry(obj, *p, global_link) list_for_each_entry(obj, *p, mm.link)
__start_cpu_write(obj); __start_cpu_write(obj);
} }
mutex_unlock(&dev_priv->drm.struct_mutex); spin_unlock(&dev_priv->mm.obj_lock);
return 0; return 0;
} }
...@@ -5461,7 +5518,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) ...@@ -5461,7 +5518,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
goto err_unlock; goto err_unlock;
} }
pages = obj->mm.pages; pages = fetch_and_zero(&obj->mm.pages);
if (pages) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
__i915_gem_object_reset_page_iter(obj);
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
spin_unlock(&i915->mm.obj_lock);
}
obj->ops = &i915_gem_phys_ops; obj->ops = &i915_gem_phys_ops;
err = ____i915_gem_object_get_pages(obj); err = ____i915_gem_object_get_pages(obj);
......
...@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = { ...@@ -70,6 +70,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
static void __i915_do_clflush(struct drm_i915_gem_object *obj) static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages); drm_clflush_sg(obj->mm.pages);
intel_fb_obj_flush(obj, ORIGIN_CPU); intel_fb_obj_flush(obj, ORIGIN_CPU);
} }
......
...@@ -33,6 +33,10 @@ ...@@ -33,6 +33,10 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
static bool ggtt_is_idle(struct drm_i915_private *i915) static bool ggtt_is_idle(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -205,6 +209,9 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -205,6 +209,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
* the kernel's there is no more we can evict. * the kernel's there is no more we can evict.
*/ */
if (!ggtt_is_idle(dev_priv)) { if (!ggtt_is_idle(dev_priv)) {
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
ret = ggtt_flush(dev_priv); ret = ggtt_flush(dev_priv);
if (ret) if (ret)
return ret; return ret;
......
...@@ -3594,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ...@@ -3594,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */ ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */ /* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on, list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
&dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false; bool ggtt_bound = false;
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -114,7 +114,6 @@ struct drm_i915_gem_object { ...@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
/** Stolen memory for this object, instead of being backed by shmem. */ /** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct list_head global_link;
union { union {
struct rcu_head rcu; struct rcu_head rcu;
struct llist_node freed; struct llist_node freed;
...@@ -161,7 +160,8 @@ struct drm_i915_gem_object { ...@@ -161,7 +160,8 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */ /** Count of VMA actually bound by this object */
unsigned int bind_count; unsigned int bind_count;
unsigned int active_count; unsigned int active_count;
unsigned int pin_display; /** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
struct { struct {
struct mutex lock; /* protects the pages and their use */ struct mutex lock; /* protects the pages and their use */
...@@ -207,6 +207,12 @@ struct drm_i915_gem_object { ...@@ -207,6 +207,12 @@ struct drm_i915_gem_object {
struct mutex lock; /* protects this cache */ struct mutex lock; /* protects this cache */
} get_page; } get_page;
/**
* Element within i915->mm.unbound_list or i915->mm.bound_list,
* locked by i915->mm.obj_lock.
*/
struct list_head link;
/** /**
* Advice: are the backing pages purgeable? * Advice: are the backing pages purgeable?
*/ */
......
...@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) ...@@ -229,7 +229,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
return 0; return 0;
/* Recreate the page after shrinking */ /* Recreate the page after shrinking */
if (!so->vma->obj->mm.pages) if (!i915_gem_object_has_pages(so->vma->obj))
so->batch_offset = -1; so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
......
...@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock) ...@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
/* Only GGTT vma may be permanently pinned, and are always
* at the start of the list. We can stop hunting as soon
* as we see a ppGTT vma.
*/
if (!i915_vma_is_ggtt(vma))
break;
if (i915_vma_is_pinned(vma))
return true;
}
return false;
}
static bool swap_available(void) static bool swap_available(void)
{ {
return get_nr_swap_pages() > 0; return get_nr_swap_pages() > 0;
...@@ -97,9 +78,6 @@ static bool swap_available(void) ...@@ -97,9 +78,6 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj) static bool can_release_pages(struct drm_i915_gem_object *obj)
{ {
if (!obj->mm.pages)
return false;
/* Consider only shrinkable ojects. */ /* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj)) if (!i915_gem_object_is_shrinkable(obj))
return false; return false;
...@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -115,7 +93,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
return false; return false;
if (any_vma_pinned(obj)) /* If any vma are "permanently" pinned, it will prevent us from
* reclaiming the obj->mm.pages. We only allow scanout objects to claim
* a permanent pin, along with a few others like the context objects.
* To simplify the scan, and to avoid walking the list of vma under the
* object, we just check the count of its permanently pinned.
*/
if (READ_ONCE(obj->pin_global))
return false; return false;
/* We can only return physical pages to the system if we can either /* We can only return physical pages to the system if we can either
...@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) ...@@ -129,7 +113,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{ {
if (i915_gem_object_unbind(obj) == 0) if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER); __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
return !READ_ONCE(obj->mm.pages); return !i915_gem_object_has_pages(obj);
} }
/** /**
...@@ -217,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -217,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
continue; continue;
INIT_LIST_HEAD(&still_in_list); INIT_LIST_HEAD(&still_in_list);
/*
* We serialize our access to unreferenced objects through
* the use of the struct_mutex. While the objects are not
* yet freed (due to RCU then a workqueue) we still want
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
*/
spin_lock(&dev_priv->mm.obj_lock);
while (count < target && while (count < target &&
(obj = list_first_entry_or_null(phase->list, (obj = list_first_entry_or_null(phase->list,
typeof(*obj), typeof(*obj),
global_link))) { mm.link))) {
list_move_tail(&obj->global_link, &still_in_list); list_move_tail(&obj->mm.link, &still_in_list);
if (!obj->mm.pages) {
list_del_init(&obj->global_link);
continue;
}
if (flags & I915_SHRINK_PURGEABLE && if (flags & I915_SHRINK_PURGEABLE &&
obj->mm.madv != I915_MADV_DONTNEED) obj->mm.madv != I915_MADV_DONTNEED)
...@@ -243,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -243,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj)) if (!can_release_pages(obj))
continue; continue;
spin_unlock(&dev_priv->mm.obj_lock);
if (unsafe_drop_pages(obj)) { if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */ /* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock, mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER); I915_MM_SHRINKER);
if (!obj->mm.pages) { if (!i915_gem_object_has_pages(obj)) {
__i915_gem_object_invalidate(obj); __i915_gem_object_invalidate(obj);
list_del_init(&obj->global_link);
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
scanned += obj->base.size >> PAGE_SHIFT;
} }
scanned += obj->base.size >> PAGE_SHIFT;
spin_lock(&dev_priv->mm.obj_lock);
} }
list_splice_tail(&still_in_list, phase->list); list_splice_tail(&still_in_list, phase->list);
spin_unlock(&dev_priv->mm.obj_lock);
} }
if (flags & I915_SHRINK_BOUND) if (flags & I915_SHRINK_BOUND)
...@@ -302,28 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) ...@@ -302,28 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
static unsigned long static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker); container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long count; unsigned long num_objects = 0;
bool unlock; unsigned long count = 0;
if (!shrinker_lock(dev_priv, &unlock))
return 0;
i915_gem_retire_requests(dev_priv);
count = 0; spin_lock(&i915->mm.obj_lock);
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
if (can_release_pages(obj)) if (can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
num_objects++;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} num_objects++;
}
spin_unlock(&i915->mm.obj_lock);
shrinker_unlock(dev_priv, unlock); /* Update our preferred vmscan batch size for the next pass.
* Our rough guess for an effective batch size is roughly 2
* available GEM objects worth of pages. That is we don't want
* the shrinker to fire, until it is worth the cost of freeing an
* entire GEM object.
*/
if (num_objects) {
unsigned long avg = 2 * count / num_objects;
i915->mm.shrinker.batch =
max((i915->mm.shrinker.batch + avg) >> 1,
128ul /* default SHRINK_BATCH */);
}
return count; return count;
} }
...@@ -400,10 +404,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -400,10 +404,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier); container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages; unsigned long unevictable, bound, unbound, freed_pages;
bool unlock;
if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
return NOTIFY_DONE;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
...@@ -412,26 +412,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -412,26 +412,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* being pointed to by hardware. * being pointed to by hardware.
*/ */
unbound = bound = unevictable = 0; unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { spin_lock(&dev_priv->mm.obj_lock);
if (!obj->mm.pages) list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
continue;
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
unbound += obj->base.size >> PAGE_SHIFT; unbound += obj->base.size >> PAGE_SHIFT;
} }
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
if (!obj->mm.pages)
continue;
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
bound += obj->base.size >> PAGE_SHIFT; bound += obj->base.size >> PAGE_SHIFT;
} }
spin_unlock(&dev_priv->mm.obj_lock);
shrinker_unlock(dev_priv, unlock);
if (freed_pages || unbound || bound) if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, " pr_info("Purging GPU memory, %lu pages freed, "
...@@ -498,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) ...@@ -498,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
dev_priv->mm.shrinker.batch = 4096;
WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
......
...@@ -724,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -724,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++; obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock);
return obj; return obj;
......
...@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, ...@@ -269,7 +269,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* due to the change in swizzling. * due to the change in swizzling.
*/ */
mutex_lock(&obj->mm.lock); mutex_lock(&obj->mm.lock);
if (obj->mm.pages && if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED && obj->mm.madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) { if (tiling == I915_TILING_NONE) {
......
...@@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work) ...@@ -82,11 +82,11 @@ static void cancel_userptr(struct work_struct *work)
/* We are inside a kthread context and can't be interrupted */ /* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0) if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj, I915_MM_NORMAL); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
WARN_ONCE(obj->mm.pages, WARN_ONCE(i915_gem_object_has_pages(obj),
"Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n", "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
obj->bind_count, obj->bind_count,
atomic_read(&obj->mm.pages_pin_count), atomic_read(&obj->mm.pages_pin_count),
obj->pin_display); obj->pin_global);
mutex_unlock(&obj->base.dev->struct_mutex); mutex_unlock(&obj->base.dev->struct_mutex);
...@@ -221,15 +221,17 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) ...@@ -221,15 +221,17 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
/* Protected by mm_lock */ /* Protected by mm_lock */
mm->mn = fetch_and_zero(&mn); mm->mn = fetch_and_zero(&mn);
} }
} else { } else if (mm->mn) {
/* someone else raced and successfully installed the mmu /*
* notifier, we can cancel our own errors */ * Someone else raced and successfully installed the mmu
* notifier, we can cancel our own errors.
*/
err = 0; err = 0;
} }
mutex_unlock(&mm->i915->mm_lock); mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem); up_write(&mm->mm->mmap_sem);
if (mn) { if (mn && !IS_ERR(mn)) {
destroy_workqueue(mn->wq); destroy_workqueue(mn->wq);
kfree(mn); kfree(mn);
} }
......
...@@ -193,7 +193,6 @@ static const struct intel_device_info intel_i965gm_info __initconst = { ...@@ -193,7 +193,6 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
static const struct intel_device_info intel_g45_info __initconst = { static const struct intel_device_info intel_g45_info __initconst = {
GEN4_FEATURES, GEN4_FEATURES,
.platform = INTEL_G45, .platform = INTEL_G45,
.has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING, .ring_mask = RENDER_RING | BSD_RING,
}; };
...@@ -201,7 +200,6 @@ static const struct intel_device_info intel_gm45_info __initconst = { ...@@ -201,7 +200,6 @@ static const struct intel_device_info intel_gm45_info __initconst = {
GEN4_FEATURES, GEN4_FEATURES,
.platform = INTEL_GM45, .platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1, .is_mobile = 1, .has_fbc = 1,
.has_pipe_cxsr = 1,
.supports_tv = 1, .supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING, .ring_mask = RENDER_RING | BSD_RING,
}; };
...@@ -645,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev) ...@@ -645,7 +643,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev); struct drm_device *dev = pci_get_drvdata(pdev);
i915_driver_unload(dev); i915_driver_unload(dev);
drm_dev_unref(dev); drm_dev_put(dev);
} }
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
......
...@@ -53,6 +53,7 @@ enum vgt_g2v_type { ...@@ -53,6 +53,7 @@ enum vgt_g2v_type {
* VGT capabilities type * VGT capabilities type
*/ */
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) #define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
struct vgt_if { struct vgt_if {
u64 magic; /* VGT_MAGIC */ u64 magic; /* VGT_MAGIC */
......
...@@ -5242,7 +5242,7 @@ enum { ...@@ -5242,7 +5242,7 @@ enum {
#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) #define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */
#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
......
...@@ -41,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) ...@@ -41,6 +41,11 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr); debug_object_init(fence, &i915_sw_fence_debug_descr);
} }
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
}
static inline void debug_fence_activate(struct i915_sw_fence *fence) static inline void debug_fence_activate(struct i915_sw_fence *fence)
{ {
debug_object_activate(fence, &i915_sw_fence_debug_descr); debug_object_activate(fence, &i915_sw_fence_debug_descr);
...@@ -79,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence) ...@@ -79,6 +84,10 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{ {
} }
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
}
static inline void debug_fence_activate(struct i915_sw_fence *fence) static inline void debug_fence_activate(struct i915_sw_fence *fence)
{ {
} }
...@@ -360,9 +369,9 @@ struct i915_sw_dma_fence_cb { ...@@ -360,9 +369,9 @@ struct i915_sw_dma_fence_cb {
struct irq_work work; struct irq_work work;
}; };
static void timer_i915_sw_fence_wake(unsigned long data) static void timer_i915_sw_fence_wake(struct timer_list *t)
{ {
struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer);
struct i915_sw_fence *fence; struct i915_sw_fence *fence;
fence = xchg(&cb->fence, NULL); fence = xchg(&cb->fence, NULL);
...@@ -425,9 +434,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, ...@@ -425,9 +434,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
i915_sw_fence_await(fence); i915_sw_fence_await(fence);
cb->dma = NULL; cb->dma = NULL;
__setup_timer(&cb->timer, timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
timer_i915_sw_fence_wake, (unsigned long)cb,
TIMER_IRQSAFE);
init_irq_work(&cb->work, irq_i915_sw_fence_work); init_irq_work(&cb->work, irq_i915_sw_fence_work);
if (timeout) { if (timeout) {
cb->dma = dma_fence_get(dma); cb->dma = dma_fence_get(dma);
...@@ -507,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, ...@@ -507,5 +514,6 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/lib_sw_fence.c"
#include "selftests/i915_sw_fence.c" #include "selftests/i915_sw_fence.c"
#endif #endif
...@@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv); ...@@ -30,6 +30,12 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv); bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
static inline bool
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
}
int intel_vgt_balloon(struct drm_i915_private *dev_priv); int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv); void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
......
...@@ -58,8 +58,10 @@ i915_vma_retire(struct i915_gem_active *active, ...@@ -58,8 +58,10 @@ i915_vma_retire(struct i915_gem_active *active,
* so that we don't steal from recently used but inactive objects * so that we don't steal from recently used but inactive objects
* (unless we are forced to ofc!) * (unless we are forced to ofc!)
*/ */
spin_lock(&rq->i915->mm.obj_lock);
if (obj->bind_count) if (obj->bind_count)
list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
spin_unlock(&rq->i915->mm.obj_lock);
obj->mm.dirty = true; /* be paranoid */ obj->mm.dirty = true; /* be paranoid */
...@@ -563,9 +565,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -563,9 +565,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
obj->bind_count++; obj->bind_count++;
spin_unlock(&dev_priv->mm.obj_lock);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
return 0; return 0;
...@@ -580,6 +586,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -580,6 +586,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
static void static void
i915_vma_remove(struct i915_vma *vma) i915_vma_remove(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
...@@ -593,9 +600,10 @@ i915_vma_remove(struct i915_vma *vma) ...@@ -593,9 +600,10 @@ i915_vma_remove(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if /* Since the unbound list is global, only move to that list if
* no more VMAs exist. * no more VMAs exist.
*/ */
spin_lock(&i915->mm.obj_lock);
if (--obj->bind_count == 0) if (--obj->bind_count == 0)
list_move_tail(&obj->global_link, list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
&to_i915(obj->base.dev)->mm.unbound_list); spin_unlock(&i915->mm.obj_lock);
/* And finally now the object is completely decoupled from this vma, /* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be * we can drop its hold on the backing storage and allow it to be
......
...@@ -691,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) ...@@ -691,6 +691,48 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
} }
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
u16 version, enum port port)
{
if (!dev_priv->vbt.dsi.config->dual_link || version < 197) {
dev_priv->vbt.dsi.bl_ports = BIT(port);
if (dev_priv->vbt.dsi.config->cabc_supported)
dev_priv->vbt.dsi.cabc_ports = BIT(port);
return;
}
switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
dev_priv->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
dev_priv->vbt.dsi.bl_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
dev_priv->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
if (!dev_priv->vbt.dsi.config->cabc_supported)
return;
switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
dev_priv->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
dev_priv->vbt.dsi.cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
dev_priv->vbt.dsi.cabc_ports =
BIT(PORT_A) | BIT(PORT_C);
break;
}
}
static void static void
parse_mipi_config(struct drm_i915_private *dev_priv, parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb) const struct bdb_header *bdb)
...@@ -699,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv, ...@@ -699,9 +741,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct mipi_config *config; const struct mipi_config *config;
const struct mipi_pps_data *pps; const struct mipi_pps_data *pps;
int panel_type = dev_priv->vbt.panel_type; int panel_type = dev_priv->vbt.panel_type;
enum port port;
/* parse MIPI blocks only if LFP type is MIPI */ /* parse MIPI blocks only if LFP type is MIPI */
if (!intel_bios_is_dsi_present(dev_priv, NULL)) if (!intel_bios_is_dsi_present(dev_priv, &port))
return; return;
/* Initialize this to undefined indicating no generic MIPI support */ /* Initialize this to undefined indicating no generic MIPI support */
...@@ -742,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv, ...@@ -742,15 +785,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return; return;
} }
/* parse_dsi_backlight_ports(dev_priv, bdb->version, port);
* These fields are introduced from the VBT version 197 onwards,
* so making sure that these bits are set zero in the previous
* versions.
*/
if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */ /* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
...@@ -1071,6 +1106,22 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, ...@@ -1071,6 +1106,22 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
} }
} }
static const u8 cnp_ddc_pin_map[] = {
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
[DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
};
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
if (HAS_PCH_CNP(dev_priv) &&
vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
return cnp_ddc_pin_map[vbt_pin];
return vbt_pin;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
u8 bdb_version) u8 bdb_version)
{ {
...@@ -1163,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1163,16 +1214,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) { if (is_dvi) {
info->alternate_ddc_pin = ddc_pin; info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
/*
* All VBTs that we got so far for B Stepping has this
* information wrong for Port D. So, let's just ignore for now.
*/
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
port == PORT_D) {
info->alternate_ddc_pin = 0;
}
sanitize_ddc_pin(dev_priv, port); sanitize_ddc_pin(dev_priv, port);
} }
......
...@@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine) ...@@ -74,9 +74,10 @@ static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
} }
static void intel_breadcrumbs_hangcheck(unsigned long data) static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{ {
struct intel_engine_cs *engine = (struct intel_engine_cs *)data; struct intel_engine_cs *engine = from_timer(engine, t,
breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed) if (!b->irq_armed)
...@@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data) ...@@ -108,9 +109,10 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
} }
} }
static void intel_breadcrumbs_fake_irq(unsigned long data) static void intel_breadcrumbs_fake_irq(struct timer_list *t)
{ {
struct intel_engine_cs *engine = (struct intel_engine_cs *)data; struct intel_engine_cs *engine = from_timer(engine, t,
breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The timer persists in case we cannot enable interrupts, /* The timer persists in case we cannot enable interrupts,
...@@ -787,12 +789,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) ...@@ -787,12 +789,8 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
spin_lock_init(&b->rb_lock); spin_lock_init(&b->rb_lock);
spin_lock_init(&b->irq_lock); spin_lock_init(&b->irq_lock);
setup_timer(&b->fake_irq, timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
intel_breadcrumbs_fake_irq, timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
(unsigned long)engine);
setup_timer(&b->hangcheck,
intel_breadcrumbs_hangcheck,
(unsigned long)engine);
/* Spawn a thread to provide a common bottom-half for all signals. /* Spawn a thread to provide a common bottom-half for all signals.
* As this is an asynchronous interface we cannot steal the current * As this is an asynchronous interface we cannot steal the current
......
...@@ -343,11 +343,26 @@ intel_crt_mode_valid(struct drm_connector *connector, ...@@ -343,11 +343,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
static bool intel_crt_compute_config(struct intel_encoder *encoder, static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config, struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state) struct drm_connector_state *conn_state)
{
return true;
}
static bool pch_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
pipe_config->has_pch_encoder = true;
return true;
}
static bool hsw_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (HAS_PCH_SPLIT(dev_priv)) pipe_config->has_pch_encoder = true;
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */ /* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) { if (HAS_PCH_LPT(dev_priv)) {
...@@ -360,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, ...@@ -360,8 +375,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
} }
/* FDI must always be 2.7 GHz */ /* FDI must always be 2.7 GHz */
if (HAS_DDI(dev_priv)) pipe_config->port_clock = 135000 * 2;
pipe_config->port_clock = 135000 * 2;
return true; return true;
} }
...@@ -959,11 +973,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv) ...@@ -959,11 +973,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
!dmi_check_system(intel_spurious_crt_detect)) !dmi_check_system(intel_spurious_crt_detect))
crt->base.hpd_pin = HPD_CRT; crt->base.hpd_pin = HPD_CRT;
crt->base.compute_config = intel_crt_compute_config;
if (HAS_DDI(dev_priv)) { if (HAS_DDI(dev_priv)) {
crt->base.port = PORT_E; crt->base.port = PORT_E;
crt->base.get_config = hsw_crt_get_config; crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state; crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.compute_config = hsw_crt_compute_config;
crt->base.pre_pll_enable = hsw_pre_pll_enable_crt; crt->base.pre_pll_enable = hsw_pre_pll_enable_crt;
crt->base.pre_enable = hsw_pre_enable_crt; crt->base.pre_enable = hsw_pre_enable_crt;
crt->base.enable = hsw_enable_crt; crt->base.enable = hsw_enable_crt;
...@@ -971,9 +985,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv) ...@@ -971,9 +985,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.post_disable = hsw_post_disable_crt; crt->base.post_disable = hsw_post_disable_crt;
} else { } else {
if (HAS_PCH_SPLIT(dev_priv)) { if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.compute_config = pch_crt_compute_config;
crt->base.disable = pch_disable_crt; crt->base.disable = pch_disable_crt;
crt->base.post_disable = pch_post_disable_crt; crt->base.post_disable = pch_post_disable_crt;
} else { } else {
crt->base.compute_config = intel_crt_compute_config;
crt->base.disable = intel_disable_crt; crt->base.disable = intel_disable_crt;
} }
crt->base.port = PORT_NONE; crt->base.port = PORT_NONE;
......
...@@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL); ...@@ -52,10 +52,6 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT); MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
...@@ -291,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -291,7 +287,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
css_header = (struct intel_css_header *)fw->data; css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) != if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) { (css_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", DRM_ERROR("DMC firmware has wrong CSS header length "
"(%u bytes)\n",
(css_header->header_len * 4)); (css_header->header_len * 4));
return NULL; return NULL;
} }
...@@ -315,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -315,7 +312,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
if (csr->version != required_version) { if (csr->version != required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u," DRM_INFO("Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u [" FIRMWARE_URL "].\n", " please use v%u.%u\n",
CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version), CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(required_version), CSR_VERSION_MAJOR(required_version),
...@@ -330,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -330,7 +327,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
&fw->data[readcount]; &fw->data[readcount];
if (sizeof(struct intel_package_header) != if (sizeof(struct intel_package_header) !=
(package_header->header_len * 4)) { (package_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong package header length %u bytes\n", DRM_ERROR("DMC firmware has wrong package header length "
"(%u bytes)\n",
(package_header->header_len * 4)); (package_header->header_len * 4));
return NULL; return NULL;
} }
...@@ -351,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -351,7 +349,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
dmc_offset = package_header->fw_info[i].offset; dmc_offset = package_header->fw_info[i].offset;
} }
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
DRM_ERROR("Firmware not supported for %c stepping\n", DRM_ERROR("DMC firmware not supported for %c stepping\n",
si->stepping); si->stepping);
return NULL; return NULL;
} }
...@@ -360,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -360,7 +358,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Extract dmc_header information. */ /* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", DRM_ERROR("DMC firmware has wrong dmc header length "
"(%u bytes)\n",
(dmc_header->header_len)); (dmc_header->header_len));
return NULL; return NULL;
} }
...@@ -368,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -368,7 +367,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Cache the dmc header info. */ /* Cache the dmc header info. */
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
DRM_ERROR("Firmware has wrong mmio count %u\n", DRM_ERROR("DMC firmware has wrong mmio count %u\n",
dmc_header->mmio_count); dmc_header->mmio_count);
return NULL; return NULL;
} }
...@@ -376,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -376,7 +375,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
for (i = 0; i < dmc_header->mmio_count; i++) { for (i = 0; i < dmc_header->mmio_count; i++) {
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]); dmc_header->mmioaddr[i]);
return NULL; return NULL;
} }
...@@ -387,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -387,7 +386,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4; nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) { if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes);
return NULL; return NULL;
} }
csr->dmc_fw_size = dmc_header->fw_size; csr->dmc_fw_size = dmc_header->fw_size;
...@@ -425,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work) ...@@ -425,9 +424,11 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MINOR(csr->version)); CSR_VERSION_MINOR(csr->version));
} else { } else {
dev_notice(dev_priv->drm.dev, dev_notice(dev_priv->drm.dev,
"Failed to load DMC firmware" "Failed to load DMC firmware %s."
" [" FIRMWARE_URL "]," " Disabling runtime power management.\n",
" disabling runtime power management.\n"); csr->fw_path);
dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
} }
release_firmware(fw); release_firmware(fw);
......
此差异已折叠。
...@@ -2847,7 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2847,7 +2847,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) { if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = c->primary->fb; fb = c->primary->fb;
drm_framebuffer_reference(fb); drm_framebuffer_get(fb);
goto valid_fb; goto valid_fb;
} }
} }
...@@ -2878,7 +2878,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2878,7 +2878,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
intel_crtc->pipe, PTR_ERR(intel_state->vma)); intel_crtc->pipe, PTR_ERR(intel_state->vma));
intel_state->vma = NULL; intel_state->vma = NULL;
drm_framebuffer_unreference(fb); drm_framebuffer_put(fb);
return; return;
} }
...@@ -2899,7 +2899,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, ...@@ -2899,7 +2899,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (i915_gem_object_is_tiled(obj)) if (i915_gem_object_is_tiled(obj))
dev_priv->preserve_bios_swizzle = true; dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(fb); drm_framebuffer_get(fb);
primary->fb = primary->state->fb = fb; primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base; primary->crtc = primary->state->crtc = &intel_crtc->base;
...@@ -3289,7 +3289,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary, ...@@ -3289,7 +3289,6 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
const struct intel_plane_state *plane_state) const struct intel_plane_state *plane_state)
{ {
struct drm_i915_private *dev_priv = to_i915(primary->base.dev); struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane plane = primary->plane; enum plane plane = primary->plane;
u32 linear_offset; u32 linear_offset;
...@@ -3298,16 +3297,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary, ...@@ -3298,16 +3297,14 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
int x = plane_state->main.x; int x = plane_state->main.x;
int y = plane_state->main.y; int y = plane_state->main.y;
unsigned long irqflags; unsigned long irqflags;
u32 dspaddr_offset;
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4) if (INTEL_GEN(dev_priv) >= 4)
crtc->dspaddr_offset = plane_state->main.offset; dspaddr_offset = plane_state->main.offset;
else else
crtc->dspaddr_offset = linear_offset; dspaddr_offset = linear_offset;
crtc->adjusted_x = x;
crtc->adjusted_y = y;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
...@@ -3333,18 +3330,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary, ...@@ -3333,18 +3330,18 @@ static void i9xx_update_primary_plane(struct intel_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(DSPSURF(plane), I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) + intel_plane_ggtt_offset(plane_state) +
crtc->dspaddr_offset); dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) { } else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane), I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) + intel_plane_ggtt_offset(plane_state) +
crtc->dspaddr_offset); dspaddr_offset);
I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE_FW(DSPLINOFF(plane), linear_offset); I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
} else { } else {
I915_WRITE_FW(DSPADDR(plane), I915_WRITE_FW(DSPADDR(plane),
intel_plane_ggtt_offset(plane_state) + intel_plane_ggtt_offset(plane_state) +
crtc->dspaddr_offset); dspaddr_offset);
} }
POSTING_READ_FW(reg); POSTING_READ_FW(reg);
...@@ -3544,100 +3541,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, ...@@ -3544,100 +3541,6 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
return plane_ctl; return plane_ctl;
} }
static void skylake_update_primary_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
u32 aux_stride = skl_plane_stride(fb, 1, rotation);
u32 surf_addr = plane_state->main.offset;
int scaler_id = plane_state->scaler_id;
int src_x = plane_state->main.x;
int src_y = plane_state->main.y;
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
int src_h = drm_rect_height(&plane_state->base.src) >> 16;
int dst_x = plane_state->base.dst.x1;
int dst_y = plane_state->base.dst.y1;
int dst_w = drm_rect_width(&plane_state->base.dst);
int dst_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
/* Sizes are 0 based */
src_w--;
src_h--;
dst_w--;
dst_h--;
crtc->dspaddr_offset = surf_addr;
crtc->adjusted_x = src_x;
crtc->adjusted_y = src_y;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
PLANE_COLOR_PLANE_GAMMA_DISABLE);
}
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->aux.offset - surf_addr) | aux_stride);
I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
(plane_state->aux.y << 16) | plane_state->aux.x);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
WARN_ON(!dst_w || !dst_h);
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
crtc_state->scaler_state.scalers[scaler_id].mode;
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
} else {
I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
}
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void skylake_disable_primary_plane(struct intel_plane *primary,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
enum plane_id plane_id = primary->id;
enum pipe pipe = primary->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static int static int
__intel_display_resume(struct drm_device *dev, __intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state, struct drm_atomic_state *state,
...@@ -6139,6 +6042,19 @@ struct intel_connector *intel_connector_alloc(void) ...@@ -6139,6 +6042,19 @@ struct intel_connector *intel_connector_alloc(void)
return connector; return connector;
} }
/*
* Free the bits allocated by intel_connector_alloc.
* This should only be used after intel_connector_alloc has returned
* successfully, and before drm_connector_init returns successfully.
* Otherwise the destroy callbacks for the connector and the state should
* take care of proper cleanup/free
*/
void intel_connector_free(struct intel_connector *connector)
{
kfree(to_intel_digital_connector_state(connector->base.state));
kfree(connector);
}
/* Simple connector->get_hw_state implementation for encoders that support only /* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state * one connector and no cloning and hence the encoder state determines the state
* of the connector. */ * of the connector. */
...@@ -6522,11 +6438,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, ...@@ -6522,11 +6438,9 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp; crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) { reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2; crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else { } else {
crtc_state->dpll_hw_state.fp1 = fp; crtc_state->dpll_hw_state.fp1 = fp;
} }
...@@ -7221,15 +7135,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) ...@@ -7221,15 +7135,6 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
} }
} }
if (HAS_PIPE_CXSR(dev_priv)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
} else {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
}
}
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 || if (INTEL_GEN(dev_priv) < 4 ||
intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
...@@ -8365,8 +8270,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, ...@@ -8365,8 +8270,6 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0, memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state)); sizeof(crtc_state->dpll_hw_state));
crtc->lowfreq_avail = false;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder) if (!crtc_state->has_pch_encoder)
return 0; return 0;
...@@ -9025,8 +8928,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, ...@@ -9025,8 +8928,6 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
} }
} }
crtc->lowfreq_avail = false;
return 0; return 0;
} }
...@@ -9846,7 +9747,7 @@ mode_fits_in_fbdev(struct drm_device *dev, ...@@ -9846,7 +9747,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
if (obj->base.size < mode->vdisplay * fb->pitches[0]) if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL; return NULL;
drm_framebuffer_reference(fb); drm_framebuffer_get(fb);
return fb; return fb;
#else #else
return NULL; return NULL;
...@@ -10027,7 +9928,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -10027,7 +9928,7 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
if (ret) if (ret)
goto fail; goto fail;
drm_framebuffer_unreference(fb); drm_framebuffer_put(fb);
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret) if (ret)
...@@ -10662,6 +10563,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, ...@@ -10662,6 +10563,52 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu); m_n->link_m, m_n->link_n, m_n->tu);
} }
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
OUTPUT_TYPE(UNUSED),
OUTPUT_TYPE(ANALOG),
OUTPUT_TYPE(DVO),
OUTPUT_TYPE(SDVO),
OUTPUT_TYPE(LVDS),
OUTPUT_TYPE(TVOUT),
OUTPUT_TYPE(HDMI),
OUTPUT_TYPE(DP),
OUTPUT_TYPE(EDP),
OUTPUT_TYPE(DSI),
OUTPUT_TYPE(UNKNOWN),
OUTPUT_TYPE(DP_MST),
};
#undef OUTPUT_TYPE
static void snprintf_output_types(char *buf, size_t len,
unsigned int output_types)
{
char *str = buf;
int i;
str[0] = '\0';
for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
int r;
if ((output_types & BIT(i)) == 0)
continue;
r = snprintf(str, len, "%s%s",
str != buf ? "," : "", output_type_str[i]);
if (r >= len)
break;
str += r;
len -= r;
output_types &= ~BIT(i);
}
WARN_ON_ONCE(output_types != 0);
}
static void intel_dump_pipe_config(struct intel_crtc *crtc, static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config, struct intel_crtc_state *pipe_config,
const char *context) const char *context)
...@@ -10672,10 +10619,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, ...@@ -10672,10 +10619,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane *intel_plane; struct intel_plane *intel_plane;
struct intel_plane_state *state; struct intel_plane_state *state;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
char buf[64];
DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
crtc->base.base.id, crtc->base.name, context); crtc->base.base.id, crtc->base.name, context);
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
buf, pipe_config->output_types);
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder), transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither); pipe_config->pipe_bpp, pipe_config->dither);
...@@ -13229,8 +13181,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -13229,8 +13181,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
num_formats = ARRAY_SIZE(skl_primary_formats); num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs; modifiers = skl_format_modifiers_ccs;
primary->update_plane = skylake_update_primary_plane; primary->update_plane = skl_update_plane;
primary->disable_plane = skylake_disable_primary_plane; primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 9) { } else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats; intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats); num_formats = ARRAY_SIZE(skl_primary_formats);
...@@ -13239,8 +13191,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -13239,8 +13191,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
else else
modifiers = skl_format_modifiers_noccs; modifiers = skl_format_modifiers_noccs;
primary->update_plane = skylake_update_primary_plane; primary->update_plane = skl_update_plane;
primary->disable_plane = skylake_disable_primary_plane; primary->disable_plane = skl_disable_plane;
} else if (INTEL_GEN(dev_priv) >= 4) { } else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats; intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats); num_formats = ARRAY_SIZE(i965_primary_formats);
......
...@@ -1007,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, ...@@ -1007,7 +1007,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
else else
precharge = 5; precharge = 5;
if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A) if (IS_BROADWELL(dev_priv))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us; timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us; timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
...@@ -1032,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, ...@@ -1032,7 +1032,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_TIME_OUT_1600us | DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
...@@ -1832,6 +1832,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -1832,6 +1832,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (!HAS_DDI(dev_priv)) if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config); intel_dp_set_clock(encoder, pipe_config);
intel_psr_compute_config(intel_dp, pipe_config);
return true; return true;
} }
...@@ -3153,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) ...@@ -3153,9 +3155,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port; enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_GEN9_LP(dev_priv)) if (INTEL_GEN(dev_priv) >= 9) {
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_GEN(dev_priv) >= 9) {
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
return intel_ddi_dp_voltage_max(encoder); return intel_ddi_dp_voltage_max(encoder);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
......
...@@ -454,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo ...@@ -454,32 +454,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector; struct intel_connector *intel_connector;
struct drm_connector *connector; struct drm_connector *connector;
int i; enum pipe pipe;
int ret;
intel_connector = intel_connector_alloc(); intel_connector = intel_connector_alloc();
if (!intel_connector) if (!intel_connector)
return NULL; return NULL;
connector = &intel_connector->base; connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
intel_connector_free(intel_connector);
return NULL;
}
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state; intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp; intel_connector->mst_port = intel_dp;
intel_connector->port = port; intel_connector->port = port;
for (i = PIPE_A; i <= PIPE_C; i++) { for_each_pipe(dev_priv, pipe) {
drm_mode_connector_attach_encoder(&intel_connector->base, struct drm_encoder *enc =
&intel_dp->mst_encoders[i]->base.base); &intel_dp->mst_encoders[pipe]->base.base;
ret = drm_mode_connector_attach_encoder(&intel_connector->base,
enc);
if (ret)
goto err;
} }
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop); ret = drm_mode_connector_set_path_property(connector, pathprop);
if (ret)
goto err;
return connector; return connector;
err:
drm_connector_cleanup(connector);
return NULL;
} }
static void intel_dp_register_mst_connector(struct drm_connector *connector) static void intel_dp_register_mst_connector(struct drm_connector *connector)
...@@ -569,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum ...@@ -569,11 +589,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
static bool static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{ {
int i;
struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe;
for (i = PIPE_A; i <= PIPE_C; i++) for_each_pipe(dev_priv, pipe)
intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i); intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
return true; return true;
} }
......
...@@ -718,6 +718,9 @@ struct intel_crtc_state { ...@@ -718,6 +718,9 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2; struct intel_link_m_n dp_m2_n2;
bool has_drrs; bool has_drrs;
bool has_psr;
bool has_psr2;
/* /*
* Frequence the dpll for the port should run at. Differs from the * Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
...@@ -800,18 +803,10 @@ struct intel_crtc { ...@@ -800,18 +803,10 @@ struct intel_crtc {
* some outputs connected to this crtc. * some outputs connected to this crtc.
*/ */
bool active; bool active;
bool lowfreq_avail;
u8 plane_ids_mask; u8 plane_ids_mask;
unsigned long long enabled_power_domains; unsigned long long enabled_power_domains;
struct intel_overlay *overlay; struct intel_overlay *overlay;
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
u32 dspaddr_offset;
int adjusted_x;
int adjusted_y;
struct intel_crtc_state *config; struct intel_crtc_state *config;
/* global reset count when the last flip was submitted */ /* global reset count when the last flip was submitted */
...@@ -1066,7 +1061,7 @@ struct intel_digital_port { ...@@ -1066,7 +1061,7 @@ struct intel_digital_port {
void (*write_infoframe)(struct drm_encoder *encoder, void (*write_infoframe)(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len); const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder, void (*set_infoframes)(struct drm_encoder *encoder,
bool enable, bool enable,
...@@ -1360,6 +1355,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); ...@@ -1360,6 +1355,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder); void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *); int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void); struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
bool intel_connector_get_hw_state(struct intel_connector *connector); bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector, void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder); struct intel_encoder *encoder);
...@@ -1764,6 +1760,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, ...@@ -1764,6 +1760,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
/* intel_runtime_pm.c */ /* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *); int intel_power_domains_init(struct drm_i915_private *);
...@@ -1923,6 +1921,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, ...@@ -1923,6 +1921,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
/* intel_tv.c */ /* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv); void intel_tv_init(struct drm_i915_private *dev_priv);
......
...@@ -1751,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) ...@@ -1751,42 +1751,13 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
else else
intel_encoder->crtc_mask = BIT(PIPE_B); intel_encoder->crtc_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link) { if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
else
switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
case DL_DCS_PORT_A:
intel_dsi->dcs_backlight_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
intel_dsi->dcs_backlight_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
case DL_DCS_PORT_A:
intel_dsi->dcs_cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
intel_dsi->dcs_cabc_ports = BIT(PORT_C);
break;
default:
case DL_DCS_PORT_A_AND_C:
intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
break;
}
} else {
intel_dsi->ports = BIT(port); intel_dsi->ports = BIT(port);
intel_dsi->dcs_backlight_ports = BIT(port);
intel_dsi->dcs_cabc_ports = BIT(port);
}
if (!dev_priv->vbt.dsi.config->cabc_supported) intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
intel_dsi->dcs_cabc_ports = 0; intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */ /* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <drm/drm_print.h> #include <drm/drm_print.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include "intel_lrc.h" #include "intel_lrc.h"
...@@ -386,10 +387,6 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) ...@@ -386,10 +387,6 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
static bool csb_force_mmio(struct drm_i915_private *i915) static bool csb_force_mmio(struct drm_i915_private *i915)
{ {
/* GVT emulation depends upon intercepting CSB mmio */
if (intel_vgpu_active(i915))
return true;
/* /*
* IOMMU adds unpredictable latency causing the CSB write (from the * IOMMU adds unpredictable latency causing the CSB write (from the
* GPU into the HWSP) to only be visible some time after the interrupt * GPU into the HWSP) to only be visible some time after the interrupt
...@@ -398,6 +395,10 @@ static bool csb_force_mmio(struct drm_i915_private *i915) ...@@ -398,6 +395,10 @@ static bool csb_force_mmio(struct drm_i915_private *i915)
if (intel_vtd_active()) if (intel_vtd_active())
return true; return true;
/* Older GVT emulation depends upon intercepting CSB mmio */
if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
return true;
return false; return false;
} }
...@@ -1625,8 +1626,10 @@ static void print_request(struct drm_printer *m, ...@@ -1625,8 +1626,10 @@ static void print_request(struct drm_printer *m,
struct drm_i915_gem_request *rq, struct drm_i915_gem_request *rq,
const char *prefix) const char *prefix)
{ {
drm_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix, drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix,
rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno, rq->global_seqno,
i915_gem_request_completed(rq) ? "!" : "",
rq->ctx->hw_id, rq->fence.seqno,
rq->priotree.priority, rq->priotree.priority,
jiffies_to_msecs(jiffies - rq->emitted_jiffies), jiffies_to_msecs(jiffies - rq->emitted_jiffies),
rq->timeline->common->name); rq->timeline->common->name);
...@@ -1634,8 +1637,9 @@ static void print_request(struct drm_printer *m, ...@@ -1634,8 +1637,9 @@ static void print_request(struct drm_printer *m,
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
{ {
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs * const b = &engine->breadcrumbs;
struct i915_gpu_error *error = &engine->i915->gpu_error; const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
struct rb_node *rb; struct rb_node *rb;
...@@ -1699,7 +1703,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1699,7 +1703,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
if (i915_modparams.enable_execlists) { if (i915_modparams.enable_execlists) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
struct intel_engine_execlists * const execlists = &engine->execlists;
u32 ptr, read, write; u32 ptr, read, write;
unsigned int idx; unsigned int idx;
...@@ -1747,17 +1750,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1747,17 +1750,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
} }
} }
rcu_read_unlock(); rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
list_for_each_entry(rq, &p->requests,
priotree.link)
print_request(m, rq, "\t\tQ ");
}
spin_unlock_irq(&engine->timeline->lock);
} else if (INTEL_GEN(dev_priv) > 6) { } else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
I915_READ(RING_PP_DIR_BASE(engine))); I915_READ(RING_PP_DIR_BASE(engine)));
...@@ -1767,6 +1759,18 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1767,6 +1759,18 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
I915_READ(RING_PP_DIR_DCLV(engine))); I915_READ(RING_PP_DIR_DCLV(engine)));
} }
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link)
print_request(m, rq, "\t\tE ");
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
list_for_each_entry(rq, &p->requests, priotree.link)
print_request(m, rq, "\t\tQ ");
}
spin_unlock_irq(&engine->timeline->lock);
spin_lock_irq(&b->rb_lock); spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node); struct intel_wait *w = rb_entry(rb, typeof(*w), node);
......
...@@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) ...@@ -69,9 +69,9 @@ static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
* address we program because it starts at the real start of the buffer, so we * address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here. * have to take this into consideration here.
*/ */
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
{ {
return crtc->base.y - crtc->adjusted_y; return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
} }
/* /*
...@@ -727,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) ...@@ -727,8 +727,8 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
&effective_h); &effective_h);
effective_w += crtc->adjusted_x; effective_w += fbc->state_cache.plane.adjusted_x;
effective_h += crtc->adjusted_y; effective_h += fbc->state_cache.plane.adjusted_y;
return effective_w <= max_w && effective_h <= max_h; return effective_w <= max_w && effective_h <= max_h;
} }
...@@ -757,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, ...@@ -757,6 +757,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
cache->plane.visible = plane_state->base.visible; cache->plane.visible = plane_state->base.visible;
cache->plane.adjusted_x = plane_state->main.x;
cache->plane.adjusted_y = plane_state->main.y;
cache->plane.y = plane_state->base.src.y1 >> 16;
if (!cache->plane.visible) if (!cache->plane.visible)
return; return;
...@@ -888,7 +891,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, ...@@ -888,7 +891,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.pipe = crtc->pipe; params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane; params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format; params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride; params->fb.stride = cache->fb.stride;
......
...@@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -189,7 +189,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
" releasing it\n", " releasing it\n",
intel_fb->base.width, intel_fb->base.height, intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height); sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base); drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL; intel_fb = ifbdev->fb = NULL;
} }
if (!intel_fb || WARN_ON(!intel_fb->obj)) { if (!intel_fb || WARN_ON(!intel_fb->obj)) {
...@@ -627,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, ...@@ -627,7 +627,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8; ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb; ifbdev->fb = fb;
drm_framebuffer_reference(&ifbdev->fb->base); drm_framebuffer_get(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */ /* Final pass to check if any active pipes don't have fbs */
for_each_crtc(dev, crtc) { for_each_crtc(dev, crtc) {
......
...@@ -67,6 +67,99 @@ void intel_guc_init_early(struct intel_guc *guc) ...@@ -67,6 +67,99 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq; guc->notify = gen8_guc_raise_irq;
} }
static u32 get_gt_type(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
return 0;
}
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
u32 gen = INTEL_GEN(dev_priv);
switch (gen) {
case 9:
return GUC_CORE_FAMILY_GEN9;
default:
MISSING_CASE(gen);
return GUC_CORE_FAMILY_UNKNOWN;
}
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
void intel_guc_init_params(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 params[GUC_CTL_MAX_DWORDS];
int i;
memset(params, 0, sizeof(params));
params[GUC_CTL_DEVICE_INFO] |=
(get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
(get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);
/*
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
* second. This ARAR is calculated by:
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
*/
params[GUC_CTL_ARAT_HIGH] = 0;
params[GUC_CTL_ARAT_LOW] = 100000000;
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
if (i915_modparams.guc_log_level >= 0) {
params[GUC_CTL_DEBUG] =
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
} else {
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
}
/* If GuC submission is enabled, set up additional parameters here */
if (i915_modparams.enable_guc_submission) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
/* Unmask this bit to enable the GuC's internal scheduler */
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
}
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len) int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len)
{ {
WARN(1, "Unexpected send: action=%#x\n", *action); WARN(1, "Unexpected send: action=%#x\n", *action);
...@@ -263,3 +356,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) ...@@ -263,3 +356,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
i915_gem_object_put(obj); i915_gem_object_put(obj);
return vma; return vma;
} }
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
if (IS_GEN9_LP(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
}
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define _INTEL_GUC_H_ #define _INTEL_GUC_H_
#include "intel_uncore.h" #include "intel_uncore.h"
#include "intel_guc_fw.h"
#include "intel_guc_fwif.h" #include "intel_guc_fwif.h"
#include "intel_guc_ct.h" #include "intel_guc_ct.h"
#include "intel_guc_log.h" #include "intel_guc_log.h"
...@@ -33,6 +34,11 @@ ...@@ -33,6 +34,11 @@
#include "i915_guc_reg.h" #include "i915_guc_reg.h"
#include "i915_vma.h" #include "i915_vma.h"
/*
* Top level structure of GuC. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
* ExecList submission.
*/
struct intel_guc { struct intel_guc {
struct intel_uc_fw fw; struct intel_uc_fw fw;
struct intel_guc_log log; struct intel_guc_log log;
...@@ -83,6 +89,12 @@ static inline void intel_guc_notify(struct intel_guc *guc) ...@@ -83,6 +89,12 @@ static inline void intel_guc_notify(struct intel_guc *guc)
guc->notify(guc); guc->notify(guc);
} }
/*
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*/
static inline u32 guc_ggtt_offset(struct i915_vma *vma) static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{ {
u32 offset = i915_ggtt_offset(vma); u32 offset = i915_ggtt_offset(vma);
...@@ -95,6 +107,7 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma) ...@@ -95,6 +107,7 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_sample_forcewake(struct intel_guc *guc);
...@@ -102,9 +115,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); ...@@ -102,9 +115,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct drm_i915_private *dev_priv); int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv); int intel_guc_resume(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
#endif #endif
...@@ -26,31 +26,9 @@ ...@@ -26,31 +26,9 @@
* Dave Gordon <david.s.gordon@intel.com> * Dave Gordon <david.s.gordon@intel.com>
* Alex Dai <yu.dai@intel.com> * Alex Dai <yu.dai@intel.com>
*/ */
#include "i915_drv.h"
#include "intel_uc.h"
/** #include "intel_guc_fw.h"
* DOC: GuC-specific firmware loader #include "i915_drv.h"
*
* intel_guc:
* Top level structure of guc. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
* ExecList submission.
*
* Firmware versioning:
* The firmware build process will generate a version header file with major and
* minor version defined. The versions are built into CSS header of firmware.
* i915 kernel driver set the minimal firmware version required per platform.
* The firmware installation package will install (symbolic link) proper version
* of firmware.
*
* GuC address space:
* GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
* which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*
*/
#define SKL_FW_MAJOR 6 #define SKL_FW_MAJOR 6
#define SKL_FW_MINOR 1 #define SKL_FW_MINOR 1
...@@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE); ...@@ -78,88 +56,45 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR) #define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
/**
static u32 get_gttype(struct drm_i915_private *dev_priv) * intel_guc_fw_select() - selects GuC firmware for uploading
{ *
/* XXX: GT type based on PCI device ID? field seems unused by fw */ * @guc: intel_guc struct
return 0; *
} * Return: zero when we know firmware, non-zero in other case
static u32 get_core_family(struct drm_i915_private *dev_priv)
{
u32 gen = INTEL_GEN(dev_priv);
switch (gen) {
case 9:
return GUC_CORE_FAMILY_GEN9;
default:
MISSING_CASE(gen);
return GUC_CORE_FAMILY_UNKNOWN;
}
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/ */
static void guc_params_init(struct drm_i915_private *dev_priv) int intel_guc_fw_select(struct intel_guc *guc)
{ {
struct intel_guc *guc = &dev_priv->guc; struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 params[GUC_CTL_MAX_DWORDS];
int i;
memset(&params, 0, sizeof(params));
params[GUC_CTL_DEVICE_INFO] |=
(get_gttype(dev_priv) << GUC_CTL_GTTYPE_SHIFT) |
(get_core_family(dev_priv) << GUC_CTL_COREFAMILY_SHIFT);
/*
* GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
* second. This ARAR is calculated by:
* Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
*/
params[GUC_CTL_ARAT_HIGH] = 0;
params[GUC_CTL_ARAT_LOW] = 100000000;
params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
GUC_CTL_VCS2_ENABLED;
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
if (i915_modparams.guc_log_level >= 0) {
params[GUC_CTL_DEBUG] =
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
} else
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
/* If GuC submission is enabled, set up additional parameters here */
if (i915_modparams.enable_guc_submission) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS; intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
/* Unmask this bit to enable the GuC's internal scheduler */ if (i915_modparams.guc_firmware_path) {
params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER; guc->fw.path = i915_modparams.guc_firmware_path;
guc->fw.major_ver_wanted = 0;
guc->fw.minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
guc->fw.path = I915_SKL_GUC_UCODE;
guc->fw.major_ver_wanted = SKL_FW_MAJOR;
guc->fw.minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
guc->fw.path = I915_BXT_GUC_UCODE;
guc->fw.major_ver_wanted = BXT_FW_MAJOR;
guc->fw.minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
guc->fw.path = I915_KBL_GUC_UCODE;
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
guc->fw.minor_ver_wanted = KBL_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
guc->fw.path = I915_GLK_GUC_UCODE;
guc->fw.major_ver_wanted = GLK_FW_MAJOR;
guc->fw.minor_ver_wanted = GLK_FW_MINOR;
} else {
DRM_ERROR("No GuC firmware known for platform with GuC!\n");
return -ENOENT;
} }
I915_WRITE(SOFT_SCRATCH(0), 0); return 0;
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
} }
/* /*
...@@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, ...@@ -250,38 +185,16 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret; return ret;
} }
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
if (IS_GEN9_LP(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
}
/* /*
* Load the GuC firmware blob into the MinuteIA. * Load the GuC firmware blob into the MinuteIA.
*/ */
static int guc_ucode_xfer(struct drm_i915_private *dev_priv) static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{ {
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct i915_vma *vma; struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret; int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false); GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
...@@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) ...@@ -312,23 +225,15 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
} }
guc_params_init(dev_priv);
ret = guc_ucode_xfer_dma(dev_priv, vma); ret = guc_ucode_xfer_dma(dev_priv, vma);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
/*
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
i915_vma_unpin(vma);
return ret; return ret;
} }
/** /**
* intel_guc_init_hw() - finish preparing the GuC for activity * intel_guc_fw_upload() - finish preparing the GuC for activity
* @guc: intel_guc structure * @guc: intel_guc structure
* *
* Called during driver loading and also after a GPU reset. * Called during driver loading and also after a GPU reset.
...@@ -340,78 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) ...@@ -340,78 +245,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
* *
* Return: non-zero code on error * Return: non-zero code on error
*/ */
int intel_guc_init_hw(struct intel_guc *guc) int intel_guc_fw_upload(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
const char *fw_path = guc->fw.path;
int ret;
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
fw_path,
intel_uc_fw_status_repr(guc->fw.fetch_status),
intel_uc_fw_status_repr(guc->fw.load_status));
if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
return -EIO;
guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
intel_uc_fw_status_repr(guc->fw.fetch_status),
intel_uc_fw_status_repr(guc->fw.load_status));
ret = guc_ucode_xfer(dev_priv);
if (ret)
return -EAGAIN;
guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
i915_modparams.enable_guc_submission ? "submission enabled" :
"loaded",
guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found);
return 0;
}
/**
* intel_guc_select_fw() - selects GuC firmware for loading
* @guc: intel_guc struct
*
* Return: zero when we know firmware, non-zero in other case
*/
int intel_guc_select_fw(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
if (i915_modparams.guc_firmware_path) {
guc->fw.path = i915_modparams.guc_firmware_path;
guc->fw.major_ver_wanted = 0;
guc->fw.minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
guc->fw.path = I915_SKL_GUC_UCODE;
guc->fw.major_ver_wanted = SKL_FW_MAJOR;
guc->fw.minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
guc->fw.path = I915_BXT_GUC_UCODE;
guc->fw.major_ver_wanted = BXT_FW_MAJOR;
guc->fw.minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
guc->fw.path = I915_KBL_GUC_UCODE;
guc->fw.major_ver_wanted = KBL_FW_MAJOR;
guc->fw.minor_ver_wanted = KBL_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
guc->fw.path = I915_GLK_GUC_UCODE;
guc->fw.major_ver_wanted = GLK_FW_MAJOR;
guc->fw.minor_ver_wanted = GLK_FW_MINOR;
} else {
DRM_ERROR("No GuC firmware known for platform with GuC!\n");
return -ENOENT;
}
return 0;
} }
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _INTEL_GUC_FW_H_
#define _INTEL_GUC_FW_H_
struct intel_guc;
int intel_guc_fw_select(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
...@@ -82,8 +82,8 @@ ...@@ -82,8 +82,8 @@
#define GUC_CTL_ARAT_LOW 2 #define GUC_CTL_ARAT_LOW 2
#define GUC_CTL_DEVICE_INFO 3 #define GUC_CTL_DEVICE_INFO 3
#define GUC_CTL_GTTYPE_SHIFT 0 #define GUC_CTL_GT_TYPE_SHIFT 0
#define GUC_CTL_COREFAMILY_SHIFT 7 #define GUC_CTL_CORE_FAMILY_SHIFT 7
#define GUC_CTL_LOG_PARAMS 4 #define GUC_CTL_LOG_PARAMS 4
#define GUC_LOG_VALID (1 << 0) #define GUC_LOG_VALID (1 << 0)
......
...@@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) ...@@ -70,7 +70,7 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
} }
static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) static u32 g4x_infoframe_index(unsigned int type)
{ {
switch (type) { switch (type) {
case HDMI_INFOFRAME_TYPE_AVI: case HDMI_INFOFRAME_TYPE_AVI:
...@@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) ...@@ -85,7 +85,7 @@ static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
} }
} }
static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) static u32 g4x_infoframe_enable(unsigned int type)
{ {
switch (type) { switch (type) {
case HDMI_INFOFRAME_TYPE_AVI: case HDMI_INFOFRAME_TYPE_AVI:
...@@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) ...@@ -100,9 +100,11 @@ static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
} }
} }
static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) static u32 hsw_infoframe_enable(unsigned int type)
{ {
switch (type) { switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
case HDMI_INFOFRAME_TYPE_AVI: case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW; return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD: case HDMI_INFOFRAME_TYPE_SPD:
...@@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) ...@@ -118,10 +120,12 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
static i915_reg_t static i915_reg_t
hsw_dip_data_reg(struct drm_i915_private *dev_priv, hsw_dip_data_reg(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum transcoder cpu_transcoder,
enum hdmi_infoframe_type type, unsigned int type,
int i) int i)
{ {
switch (type) { switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI: case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD: case HDMI_INFOFRAME_TYPE_SPD:
...@@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, ...@@ -136,7 +140,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
static void g4x_write_infoframe(struct drm_encoder *encoder, static void g4x_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len) const void *frame, ssize_t len)
{ {
const uint32_t *data = frame; const uint32_t *data = frame;
...@@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder, ...@@ -191,7 +195,7 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder, static void ibx_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len) const void *frame, ssize_t len)
{ {
const uint32_t *data = frame; const uint32_t *data = frame;
...@@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, ...@@ -251,7 +255,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
static void cpt_write_infoframe(struct drm_encoder *encoder, static void cpt_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len) const void *frame, ssize_t len)
{ {
const uint32_t *data = frame; const uint32_t *data = frame;
...@@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder, ...@@ -309,7 +313,7 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
static void vlv_write_infoframe(struct drm_encoder *encoder, static void vlv_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len) const void *frame, ssize_t len)
{ {
const uint32_t *data = frame; const uint32_t *data = frame;
...@@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder, ...@@ -368,7 +372,7 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
static void hsw_write_infoframe(struct drm_encoder *encoder, static void hsw_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type, unsigned int type,
const void *frame, ssize_t len) const void *frame, ssize_t len)
{ {
const uint32_t *data = frame; const uint32_t *data = frame;
...@@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, ...@@ -377,6 +381,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg; i915_reg_t data_reg;
int data_size = type == DP_SDP_VSC ?
VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
int i; int i;
u32 val = I915_READ(ctl_reg); u32 val = I915_READ(ctl_reg);
...@@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, ...@@ -392,7 +398,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
data++; data++;
} }
/* Write every possible data byte to force correct ECC calculation. */ /* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4) for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0); type, i >> 2), 0);
mmiowb(); mmiowb();
......
...@@ -77,6 +77,42 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE); ...@@ -77,6 +77,42 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ #define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
GLK_HUC_FW_MINOR, GLK_BLD_NUM) GLK_HUC_FW_MINOR, GLK_BLD_NUM)
/**
* intel_huc_select_fw() - selects HuC firmware for loading
* @huc: intel_huc struct
*/
void intel_huc_select_fw(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
if (i915_modparams.huc_firmware_path) {
huc->fw.path = i915_modparams.huc_firmware_path;
huc->fw.major_ver_wanted = 0;
huc->fw.minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
huc->fw.path = I915_SKL_HUC_UCODE;
huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
huc->fw.path = I915_BXT_HUC_UCODE;
huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
huc->fw.path = I915_KBL_HUC_UCODE;
huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
huc->fw.path = I915_GLK_HUC_UCODE;
huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
DRM_ERROR("No HuC firmware known for platform with HuC!\n");
return;
}
}
/** /**
* huc_ucode_xfer() - DMA's the firmware * huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device * @dev_priv: the drm_i915_private device
...@@ -85,26 +121,15 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE); ...@@ -85,26 +121,15 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
* *
* Return: 0 on success, non-zero on failure * Return: 0 on success, non-zero on failure
*/ */
static int huc_ucode_xfer(struct drm_i915_private *dev_priv) static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{ {
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct i915_vma *vma; struct drm_i915_private *dev_priv = huc_to_i915(huc);
unsigned long offset = 0; unsigned long offset = 0;
u32 size; u32 size;
int ret; int ret;
ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false); GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
...@@ -135,51 +160,9 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv) ...@@ -135,51 +160,9 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
/*
* We keep the object pages for reuse during resume. But we can unpin it
* now that DMA has completed, so it doesn't continue to take up space.
*/
i915_vma_unpin(vma);
return ret; return ret;
} }
/**
* intel_huc_select_fw() - selects HuC firmware for loading
* @huc: intel_huc struct
*/
void intel_huc_select_fw(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
if (i915_modparams.huc_firmware_path) {
huc->fw.path = i915_modparams.huc_firmware_path;
huc->fw.major_ver_wanted = 0;
huc->fw.minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
huc->fw.path = I915_SKL_HUC_UCODE;
huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
huc->fw.path = I915_BXT_HUC_UCODE;
huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
huc->fw.path = I915_KBL_HUC_UCODE;
huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
huc->fw.path = I915_GLK_HUC_UCODE;
huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
DRM_ERROR("No HuC firmware known for platform with HuC!\n");
return;
}
}
/** /**
* intel_huc_init_hw() - load HuC uCode to device * intel_huc_init_hw() - load HuC uCode to device
* @huc: intel_huc structure * @huc: intel_huc structure
...@@ -194,33 +177,7 @@ void intel_huc_select_fw(struct intel_huc *huc) ...@@ -194,33 +177,7 @@ void intel_huc_select_fw(struct intel_huc *huc)
*/ */
void intel_huc_init_hw(struct intel_huc *huc) void intel_huc_init_hw(struct intel_huc *huc)
{ {
struct drm_i915_private *dev_priv = huc_to_i915(huc); intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
int err;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc->fw.path,
intel_uc_fw_status_repr(huc->fw.fetch_status),
intel_uc_fw_status_repr(huc->fw.load_status));
if (huc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
return;
huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
err = huc_ucode_xfer(dev_priv);
huc->fw.load_status = err ?
INTEL_UC_FIRMWARE_FAIL : INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
huc->fw.path,
intel_uc_fw_status_repr(huc->fw.fetch_status),
intel_uc_fw_status_repr(huc->fw.load_status));
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
return;
} }
/** /**
......
...@@ -793,7 +793,6 @@ static void intel_lrc_irq_handler(unsigned long data) ...@@ -793,7 +793,6 @@ static void intel_lrc_irq_handler(unsigned long data)
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
unsigned int head, tail; unsigned int head, tail;
/* However GVT emulation depends upon intercepting CSB mmio */
if (unlikely(execlists->csb_use_mmio)) { if (unlikely(execlists->csb_use_mmio)) {
buf = (u32 * __force) buf = (u32 * __force)
(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
...@@ -1094,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine, ...@@ -1094,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
i915_ggtt_offset(ce->ring->vma); i915_ggtt_offset(ce->ring->vma);
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
out: out:
...@@ -1121,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, ...@@ -1121,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
i915_vma_unpin(ce->state); i915_vma_unpin(ce->state);
......
...@@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) ...@@ -56,7 +56,7 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
if (drm_lspcon_get_mode(adapter, &current_mode)) { if (drm_lspcon_get_mode(adapter, &current_mode)) {
DRM_ERROR("Error reading LSPCON mode\n"); DRM_DEBUG_KMS("Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID; return DRM_LSPCON_MODE_INVALID;
} }
return current_mode; return current_mode;
...@@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, ...@@ -68,16 +68,15 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode current_mode; enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon); current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID) if (current_mode == mode)
goto out; goto out;
DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode)); lspcon_mode_name(mode));
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode || wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
current_mode == DRM_LSPCON_MODE_INVALID, 100);
if (current_mode != mode) if (current_mode != mode)
DRM_DEBUG_KMS("LSPCON mode hasn't settled\n"); DRM_ERROR("LSPCON mode hasn't settled\n");
out: out:
DRM_DEBUG_KMS("Current LSPCON mode %s\n", DRM_DEBUG_KMS("Current LSPCON mode %s\n",
...@@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) ...@@ -133,6 +132,7 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
static bool lspcon_probe(struct intel_lspcon *lspcon) static bool lspcon_probe(struct intel_lspcon *lspcon)
{ {
int retry;
enum drm_dp_dual_mode_type adaptor_type; enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc; struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
enum drm_lspcon_mode expected_mode; enum drm_lspcon_mode expected_mode;
...@@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) ...@@ -141,10 +141,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS; DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
/* Lets probe the adaptor and check its type */ /* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter); for (retry = 0; retry < 6; retry++) {
if (retry)
usleep_range(500, 1000);
adaptor_type = drm_dp_dual_mode_detect(adapter);
if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
break;
}
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
DRM_DEBUG_KMS("No LSPCON detected, found %s\n", DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
drm_dp_get_dual_mode_type_name(adaptor_type)); drm_dp_get_dual_mode_type_name(adaptor_type));
return false; return false;
} }
......
...@@ -6591,7 +6591,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) ...@@ -6591,7 +6591,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
uint32_t rc6_mask = 0; u32 rc6_mode, rc6_mask = 0;
/* 1a: Software RC state - RC0 */ /* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0); I915_WRITE(GEN6_RC_STATE, 0);
...@@ -6629,8 +6629,15 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) ...@@ -6629,8 +6629,15 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE; rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
/* WaRsUseTimeoutMode:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
rc6_mode = GEN7_RC_CTL_TO_MODE;
else
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
I915_WRITE(GEN6_RC_CONTROL, I915_WRITE(GEN6_RC_CONTROL,
GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask); GEN6_RC_CTL_HW_ENABLE | rc6_mode | rc6_mask);
/* /*
* 3b: Enable Coarse Power Gating only when RC6 is enabled. * 3b: Enable Coarse Power Gating only when RC6 is enabled.
......
...@@ -58,6 +58,9 @@ ...@@ -58,6 +58,9 @@
static bool is_edp_psr(struct intel_dp *intel_dp) static bool is_edp_psr(struct intel_dp *intel_dp)
{ {
if (!intel_dp_is_edp(intel_dp))
return false;
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
} }
...@@ -72,37 +75,6 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) ...@@ -72,37 +75,6 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
(val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
} }
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
const struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
for (i = 0; i < sizeof(*vsc_psr); i += 4) {
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
i >> 2), *data);
data++;
}
for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
i >> 2), 0);
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp, static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state) const struct intel_crtc_state *crtc_state)
{ {
...@@ -149,7 +121,8 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, ...@@ -149,7 +121,8 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
psr_vsc.sdp_header.HB3 = 0x8; psr_vsc.sdp_header.HB3 = 0x8;
} }
intel_psr_write_vsc(intel_dp, &psr_vsc); intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
} }
static void vlv_psr_enable_sink(struct intel_dp *intel_dp) static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
...@@ -376,22 +349,25 @@ static void hsw_psr_activate(struct intel_dp *intel_dp) ...@@ -376,22 +349,25 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp); hsw_activate_psr1(intel_dp);
} }
static bool intel_psr_match_conditions(struct intel_dp *intel_dp) void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{ {
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const struct drm_display_mode *adjusted_mode = const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode; &crtc_state->base.adjusted_mode;
int psr_setup_time; int psr_setup_time;
lockdep_assert_held(&dev_priv->psr.lock); if (!HAS_PSR(dev_priv))
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); return;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
if (!is_edp_psr(intel_dp))
return;
dev_priv->psr.source_ok = false; if (!i915_modparams.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return;
}
/* /*
* HSW spec explicitly says PSR is tied to port A. * HSW spec explicitly says PSR is tied to port A.
...@@ -402,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) ...@@ -402,66 +378,70 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
*/ */
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) { if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
return false; return;
}
if (!i915_modparams.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
} }
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
!dev_priv->psr.link_standby) { !dev_priv->psr.link_standby) {
DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
return false; return;
} }
if (IS_HASWELL(dev_priv) && if (IS_HASWELL(dev_priv) &&
I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) & I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) { S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false; return;
} }
if (IS_HASWELL(dev_priv) && if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false; return;
} }
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) { if (psr_setup_time < 0) {
DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]); intel_dp->psr_dpcd[1]);
return false; return;
} }
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time); psr_setup_time);
return false; return;
}
/*
* FIXME psr2_support is messed up. It's both computed
* dynamically during PSR enable, and extracted from sink
* caps during eDP detection.
*/
if (!dev_priv->psr.psr2_support) {
crtc_state->has_psr = true;
return;
} }
/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
if (dev_priv->psr.psr2_support && if (adjusted_mode->crtc_hdisplay > 3200 ||
(intel_crtc->config->pipe_src_w > 3200 || adjusted_mode->crtc_vdisplay > 2000) {
intel_crtc->config->pipe_src_h > 2000)) { DRM_DEBUG_KMS("PSR2 disabled, panel resolution too big\n");
dev_priv->psr.psr2_support = false; return;
return false;
} }
/* /*
* FIXME:enable psr2 only for y-cordinate psr2 panels * FIXME:enable psr2 only for y-cordinate psr2 panels
* After gtc implementation , remove this restriction. * After gtc implementation , remove this restriction.
*/ */
if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) { if (!dev_priv->psr.y_cord_support) {
DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n"); DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
return false; return;
} }
dev_priv->psr.source_ok = true; crtc_state->has_psr = true;
return true; crtc_state->has_psr2 = true;
} }
static void intel_psr_activate(struct intel_dp *intel_dp) static void intel_psr_activate(struct intel_dp *intel_dp)
...@@ -531,13 +511,8 @@ void intel_psr_enable(struct intel_dp *intel_dp, ...@@ -531,13 +511,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_PSR(dev_priv)) if (!crtc_state->has_psr)
return;
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return; return;
}
WARN_ON(dev_priv->drrs.dp); WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
...@@ -546,8 +521,8 @@ void intel_psr_enable(struct intel_dp *intel_dp, ...@@ -546,8 +521,8 @@ void intel_psr_enable(struct intel_dp *intel_dp,
goto unlock; goto unlock;
} }
if (!intel_psr_match_conditions(intel_dp)) dev_priv->psr.psr2_support = crtc_state->has_psr2;
goto unlock; dev_priv->psr.source_ok = true;
dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.busy_frontbuffer_bits = 0;
...@@ -668,7 +643,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, ...@@ -668,7 +643,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_PSR(dev_priv)) if (!old_crtc_state->has_psr)
return; return;
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
......
...@@ -484,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine) ...@@ -484,11 +484,6 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0); I915_WRITE_HEAD(engine, 0);
I915_WRITE_TAIL(engine, 0); I915_WRITE_TAIL(engine, 0);
if (INTEL_GEN(dev_priv) > 2) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
} }
...@@ -570,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine) ...@@ -570,6 +565,9 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine); intel_engine_init_hangcheck(engine);
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
out: out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
...@@ -1246,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring, ...@@ -1246,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
if (IS_ERR(addr)) if (IS_ERR(addr))
goto err; goto err;
vma->obj->pin_global++;
ring->vaddr = addr; ring->vaddr = addr;
return 0; return 0;
...@@ -1277,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring) ...@@ -1277,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
i915_gem_object_unpin_map(ring->vma->obj); i915_gem_object_unpin_map(ring->vma->obj);
ring->vaddr = NULL; ring->vaddr = NULL;
ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma); i915_vma_unpin(ring->vma);
} }
...@@ -1441,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine, ...@@ -1441,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
goto err; goto err;
ce->state->obj->mm.dirty = true; ce->state->obj->mm.dirty = true;
ce->state->obj->pin_global++;
} }
/* The kernel context is only used as a placeholder for flushing the /* The kernel context is only used as a placeholder for flushing the
...@@ -1475,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine, ...@@ -1475,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
if (--ce->pin_count) if (--ce->pin_count)
return; return;
if (ce->state) if (ce->state) {
ce->state->obj->pin_global--;
i915_vma_unpin(ce->state); i915_vma_unpin(ce->state);
}
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
......
...@@ -230,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) ...@@ -230,7 +230,7 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
#endif #endif
} }
static void void
skl_update_plane(struct intel_plane *plane, skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state) const struct intel_plane_state *plane_state)
...@@ -311,7 +311,7 @@ skl_update_plane(struct intel_plane *plane, ...@@ -311,7 +311,7 @@ skl_update_plane(struct intel_plane *plane,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
} }
static void void
skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
{ {
struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
......
...@@ -68,7 +68,7 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) ...@@ -68,7 +68,7 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
if (HAS_HUC_UCODE(dev_priv)) if (HAS_HUC_UCODE(dev_priv))
intel_huc_select_fw(&dev_priv->huc); intel_huc_select_fw(&dev_priv->huc);
if (intel_guc_select_fw(&dev_priv->guc)) if (intel_guc_fw_select(&dev_priv->guc))
i915_modparams.enable_guc_loading = 0; i915_modparams.enable_guc_loading = 0;
} }
...@@ -195,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -195,7 +195,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_submission; goto err_submission;
intel_huc_init_hw(&dev_priv->huc); intel_huc_init_hw(&dev_priv->huc);
ret = intel_guc_init_hw(&dev_priv->guc); intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN) if (ret == 0 || ret != -EAGAIN)
break; break;
...@@ -221,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -221,6 +222,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_interrupts; goto err_interrupts;
} }
dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
i915_modparams.enable_guc_submission ? "submission enabled" :
"loaded",
guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found);
return 0; return 0;
/* /*
...@@ -243,12 +250,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -243,12 +250,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
err_guc: err_guc:
i915_ggtt_disable_guc(dev_priv); i915_ggtt_disable_guc(dev_priv);
DRM_ERROR("GuC init failed\n");
if (i915_modparams.enable_guc_loading > 1 || if (i915_modparams.enable_guc_loading > 1 ||
i915_modparams.enable_guc_submission > 1) i915_modparams.enable_guc_submission > 1) {
DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
ret = -EIO; ret = -EIO;
else } else {
DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
ret = 0; ret = 0;
}
if (i915_modparams.enable_guc_submission) { if (i915_modparams.enable_guc_submission) {
i915_modparams.enable_guc_submission = 0; i915_modparams.enable_guc_submission = 0;
...@@ -256,7 +265,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -256,7 +265,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
} }
i915_modparams.enable_guc_loading = 0; i915_modparams.enable_guc_loading = 0;
DRM_NOTE("GuC firmware loading disabled\n");
return ret; return ret;
} }
......
此差异已折叠。
...@@ -25,7 +25,12 @@ ...@@ -25,7 +25,12 @@
#ifndef _INTEL_UC_FW_H_ #ifndef _INTEL_UC_FW_H_
#define _INTEL_UC_FW_H_ #define _INTEL_UC_FW_H_
struct drm_printer;
struct drm_i915_private; struct drm_i915_private;
struct i915_vma;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
enum intel_uc_fw_status { enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_FAIL = -1, INTEL_UC_FIRMWARE_FAIL = -1,
...@@ -50,6 +55,11 @@ struct intel_uc_fw { ...@@ -50,6 +55,11 @@ struct intel_uc_fw {
enum intel_uc_fw_status fetch_status; enum intel_uc_fw_status fetch_status;
enum intel_uc_fw_status load_status; enum intel_uc_fw_status load_status;
/*
* The firmware build process will generate a version header file with major and
* minor version defined. The versions are built into CSS header of firmware.
* i915 kernel driver set the minimal firmware version required per platform.
*/
u16 major_ver_wanted; u16 major_ver_wanted;
u16 minor_ver_wanted; u16 minor_ver_wanted;
u16 major_ver_found; u16 major_ver_found;
...@@ -102,6 +112,10 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) ...@@ -102,6 +112,10 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw); struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
int (*xfer)(struct intel_uc_fw *uc_fw,
struct i915_vma *vma));
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
void intel_uc_fw_dump(struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif #endif
...@@ -1403,6 +1403,9 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv, ...@@ -1403,6 +1403,9 @@ static void i915_stop_engines(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
if (INTEL_GEN(dev_priv) < 3)
return;
for_each_engine_masked(engine, dev_priv, engine_mask, id) for_each_engine_masked(engine, dev_priv, engine_mask, id)
gen3_stop_engine(engine); gen3_stop_engine(engine);
} }
...@@ -1742,16 +1745,12 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) ...@@ -1742,16 +1745,12 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{ {
reset_func reset; reset_func reset = intel_get_gpu_reset(dev_priv);
int retry; int retry;
int ret; int ret;
might_sleep(); might_sleep();
reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL)
return -ENODEV;
/* If the power well sleeps during the reset, the reset /* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO). * request may be dropped and never completes (causing -EIO).
*/ */
...@@ -1771,7 +1770,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) ...@@ -1771,7 +1770,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
*/ */
i915_stop_engines(dev_priv, engine_mask); i915_stop_engines(dev_priv, engine_mask);
ret = reset(dev_priv, engine_mask); ret = -ENODEV;
if (reset)
ret = reset(dev_priv, engine_mask);
if (ret != -ETIMEDOUT) if (ret != -ETIMEDOUT)
break; break;
......
...@@ -306,6 +306,14 @@ struct bdb_general_features { ...@@ -306,6 +306,14 @@ struct bdb_general_features {
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33 #define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
/* DDC Bus DDI Type 155+ */
enum vbt_gmbus_ddi {
DDC_BUS_DDI_B = 0x1,
DDC_BUS_DDI_C,
DDC_BUS_DDI_D,
DDC_BUS_DDI_F,
};
/* /*
* The child device config, aka the display device data structure, provides a * The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform. * description of a port and its configuration on the platform.
......
...@@ -609,7 +609,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) ...@@ -609,7 +609,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
bool single = false; bool single = false;
LIST_HEAD(objects); LIST_HEAD(objects);
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
int err; int err = -ENODEV;
for_each_prime_number_from(page_num, 1, max_pages) { for_each_prime_number_from(page_num, 1, max_pages) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -1157,7 +1157,7 @@ static int igt_ppgtt_exhaust_huge(void *arg) ...@@ -1157,7 +1157,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
unsigned int size_mask; unsigned int size_mask;
unsigned int page_mask; unsigned int page_mask;
int n, i; int n, i;
int err; int err = -ENODEV;
/* /*
* Sanity check creating objects with a varying mix of page sizes -- * Sanity check creating objects with a varying mix of page sizes --
......
...@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) ...@@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
if (err) if (err)
return err; return err;
list_for_each_entry(obj, &i915->mm.bound_list, global_link) { list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
......
...@@ -15,6 +15,7 @@ selftest(objects, i915_gem_object_live_selftests) ...@@ -15,6 +15,7 @@ selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests)
selftest(gtt, i915_gem_gtt_live_selftests) selftest(gtt, i915_gem_gtt_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests) selftest(contexts, i915_gem_context_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests)
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/prime_numbers.h>
#include "../i915_selftest.h" #include "../i915_selftest.h"
...@@ -565,6 +566,46 @@ static int test_ipc(void *arg) ...@@ -565,6 +566,46 @@ static int test_ipc(void *arg)
return ret; return ret;
} }
static int test_timer(void *arg)
{
unsigned long target, delay;
struct timed_fence tf;
timed_fence_init(&tf, target = jiffies);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with immediate expiration not signaled\n");
goto err;
}
timed_fence_fini(&tf);
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
timed_fence_init(&tf, target = jiffies + delay);
if (i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
goto err;
}
i915_sw_fence_wait(&tf.fence);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence not signaled after wait\n");
goto err;
}
if (time_before(jiffies, target)) {
pr_err("Fence signaled too early, target=%lu, now=%lu\n",
target, jiffies);
goto err;
}
timed_fence_fini(&tf);
}
return 0;
err:
timed_fence_fini(&tf);
return -EINVAL;
}
int i915_sw_fence_mock_selftests(void) int i915_sw_fence_mock_selftests(void)
{ {
static const struct i915_subtest tests[] = { static const struct i915_subtest tests[] = {
...@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void) ...@@ -576,6 +617,7 @@ int i915_sw_fence_mock_selftests(void)
SUBTEST(test_C_AB), SUBTEST(test_C_AB),
SUBTEST(test_chain), SUBTEST(test_chain),
SUBTEST(test_ipc), SUBTEST(test_ipc),
SUBTEST(test_timer),
}; };
return i915_subtests(tests, NULL); return i915_subtests(tests, NULL);
......
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "lib_sw_fence.h"
/* Small library of different fence types useful for writing tests */
static int __i915_sw_fence_call
nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
void __onstack_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
debug_fence_init_onstack(fence);
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)nop_fence_notify;
}
void onstack_fence_fini(struct i915_sw_fence *fence)
{
i915_sw_fence_commit(fence);
i915_sw_fence_fini(fence);
}
static void timed_fence_wake(unsigned long data)
{
struct timed_fence *tf = (struct timed_fence *)data;
i915_sw_fence_commit(&tf->fence);
}
void timed_fence_init(struct timed_fence *tf, unsigned long expires)
{
onstack_fence_init(&tf->fence);
setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
if (time_after(expires, jiffies))
mod_timer(&tf->timer, expires);
else
i915_sw_fence_commit(&tf->fence);
}
void timed_fence_fini(struct timed_fence *tf)
{
if (del_timer_sync(&tf->timer))
i915_sw_fence_commit(&tf->fence);
destroy_timer_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence);
}
此差异已折叠。
...@@ -73,11 +73,7 @@ mock_context(struct drm_i915_private *i915, ...@@ -73,11 +73,7 @@ mock_context(struct drm_i915_private *i915,
void mock_context_close(struct i915_gem_context *ctx) void mock_context_close(struct i915_gem_context *ctx)
{ {
i915_gem_context_set_closed(ctx); context_close(ctx);
i915_ppgtt_close(&ctx->ppgtt->base);
i915_gem_context_put(ctx);
} }
void mock_init_contexts(struct drm_i915_private *i915) void mock_init_contexts(struct drm_i915_private *i915)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册