提交 f0308d76 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2018-02-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- Userspace whitelist register GEN9_SLICE_COMMON_ECO_CHICKEN1 for GLK (Kenneth)
- Non-existent PMU counters are not placed to sysfs (Tvrtko)
- Add a note to deprecate I915_SET_COLORKEY_NONE and ignore it (Ville)
	* Intel DDX never ended using it, and implementation was wonky

Core Changes:

- Moved away from struct timeval into ktime_t in prep for 2038 (Arnd)
	* Merged the i915 portion through drm-tip, no core dependencies

Driver Changes:

- Base support for Icelake and Icelake PCH (Anusha, Rodrigo, Mahesh, Paulo, James, Kelvin)
- Add AUX-F port support for Cannonlake (Rodrigo)
- New DMC firmware for 1.07 Cannonlake (Anusha)
	* Go to linux-firmware.git to get it
- Reject non-cursor planes nearly (3 px) out of screen on GLK/CNL (Imre)
- Y/Yf modifiers restored for SKL+ sprites (Ville)
- Compressed framebuffer support for sprites (Ville)
- Tune down overly aggressive shrinking (Chris)
- Shrink kmem caches when GPU is idle (Chris)
- EDID bit-banging fallback for HDMI EDID (Stefan)
- Don't boost the GPU when the waited request is already running (Chris)
- Avoid GLK/BXT CDCLK frequency locking timeouts (Imre)
- Limit DP link rate according to VBT on CNL+ (Jani)
- Skip post-reset request emission if the engine is not idle (Chris)
- Report any link training error on a fixed eDP panel as errors (Manasi)
- DSI panel fixes for Bay Trail (Hans)
- Selftest additions and improvements (Chris, Matt)
- DMA fence test additions and accompanying fixes (Chris)
- Power domain vs. register access fix (Maarten)
- Squelch warnings for people with teensy framebuffers (stride < 512) (Maarten)
- Increase Render/Media power gating hysteresis for Gen9+ (Chris)
- HDMI vswing display workaround for Gen9+ (Ville)
- GuC code cleanup and lockdep fixes (Sagar, Michal Wa.)
- Continuously run hangcheck for simplicity (Chris)
- Execlist debugging improvements (Chris)
- GuC debugging improvements (Sujaritha, Michal Wa., Sagar)
- Command parser boundary checks (Michal Srb)
- Add a workaround for 3DSTATE_SAMPLE_PATTERN on CNL (Rafael)
- Fix PMU enabling race condition (Tvrtko)
- Usual smaller testing and debugging improvements

* tag 'drm-intel-next-2018-02-07' of git://anongit.freedesktop.org/drm/drm-intel: (158 commits)
  drm/i915: Update DRIVER_DATE to 20180207
  drm/i915/pmu: Fix PMU enable vs execlists tasklet race
  drm/i915/cnl: WaPipeControlBefore3DStateSamplePattern
  drm/i915/cmdparser: Do not check past the cmd length.
  drm/i915/cmdparser: Check reg_table_count before derefencing.
  drm/i915: Deprecate I915_SET_COLORKEY_NONE
  drm/i915: Skip post-reset request emission if the engine is not idle
  drm/i915/execlists: Move the reset bits to a more natural home
  drm/i915/selftests: Use a sacrificial context for hang testing
  drm/i915/selftests: Flush old resets between engines
  drm/i915/breadcrumbs: Drop request reference for the signaler thread
  drm/i915: Remove unbannable context spam from reset
  drm/i915/execlists: Remove the startup spam
  drm/i915: Show the GPU state when declaring wedged
  drm/i915: Always update the no_fbc_reason when disabling
  drm/i915: Add some newlines to intel_engine_dump() headers
  drm/i915: Report if an unbannable context is involved in a GPU hang
  drm/i915: Remove spurious DRM_ERROR for cancelled interrupts
  drm/i915/execlists: Flush GTIIR on clearing CS interrupts during reset
  drm/i915: reduce indent in pch detection
  ...
...@@ -83,6 +83,7 @@ i915-y += i915_cmd_parser.o \ ...@@ -83,6 +83,7 @@ i915-y += i915_cmd_parser.o \
i915-y += intel_uc.o \ i915-y += intel_uc.o \
intel_uc_fw.o \ intel_uc_fw.o \
intel_guc.o \ intel_guc.o \
intel_guc_ads.o \
intel_guc_ct.o \ intel_guc_ct.o \
intel_guc_fw.o \ intel_guc_fw.o \
intel_guc_log.o \ intel_guc_log.o \
......
...@@ -988,7 +988,10 @@ i915_next_seqno_set(void *data, u64 val) ...@@ -988,7 +988,10 @@ i915_next_seqno_set(void *data, u64 val)
if (ret) if (ret)
return ret; return ret;
intel_runtime_pm_get(dev_priv);
ret = i915_gem_set_global_seqno(dev, val); ret = i915_gem_set_global_seqno(dev, val);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -2464,24 +2467,11 @@ static int i915_guc_log_control_get(void *data, u64 *val) ...@@ -2464,24 +2467,11 @@ static int i915_guc_log_control_get(void *data, u64 *val)
static int i915_guc_log_control_set(void *data, u64 val) static int i915_guc_log_control_set(void *data, u64 val)
{ {
struct drm_i915_private *dev_priv = data; struct drm_i915_private *dev_priv = data;
int ret;
if (!HAS_GUC(dev_priv)) if (!HAS_GUC(dev_priv))
return -ENODEV; return -ENODEV;
if (!dev_priv->guc.log.vma) return intel_guc_log_control(&dev_priv->guc, val);
return -EINVAL;
ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
ret = i915_guc_log_control(dev_priv, val);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
} }
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
...@@ -2518,15 +2508,19 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2518,15 +2508,19 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
u32 stat[3]; u32 stat[3];
enum pipe pipe; enum pipe pipe;
bool enabled = false; bool enabled = false;
bool sink_support;
if (!HAS_PSR(dev_priv)) if (!HAS_PSR(dev_priv))
return -ENODEV; return -ENODEV;
sink_support = dev_priv->psr.sink_support;
seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
if (!sink_support)
return 0;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock); mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
...@@ -2584,9 +2578,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2584,9 +2578,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf); seq_printf(m, "Performance_Counter: %u\n", psrperf);
} }
if (dev_priv->psr.psr2_support) { if (dev_priv->psr.psr2_support) {
u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); u32 psr2 = I915_READ(EDP_PSR2_STATUS);
seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
psr2, psr2_live_status(psr2)); psr2, psr2_live_status(psr2));
} }
mutex_unlock(&dev_priv->psr.lock); mutex_unlock(&dev_priv->psr.lock);
...@@ -2710,7 +2704,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) ...@@ -2710,7 +2704,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv)) if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n"); seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "GPU idle: %s (epoch %u)\n",
yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n", seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv))); yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM #ifdef CONFIG_PM
...@@ -3143,8 +3138,8 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3143,8 +3138,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s\n", seq_printf(m, "GT awake? %s (epoch %u)\n",
yesno(dev_priv->gt.awake)); yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "Global active requests: %d\n", seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests); dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n", seq_printf(m, "CS timestamp frequency: %u kHz\n",
...@@ -3363,7 +3358,10 @@ static void drrs_status_per_crtc(struct seq_file *m, ...@@ -3363,7 +3358,10 @@ static void drrs_status_per_crtc(struct seq_file *m,
/* disable_drrs() will make drrs->dp NULL */ /* disable_drrs() will make drrs->dp NULL */
if (!drrs->dp) { if (!drrs->dp) {
seq_puts(m, "Idleness DRRS: Disabled"); seq_puts(m, "Idleness DRRS: Disabled\n");
if (dev_priv->psr.enabled)
seq_puts(m,
"\tAs PSR is enabled, DRRS is not enabled\n");
mutex_unlock(&drrs->mutex); mutex_unlock(&drrs->mutex);
return; return;
} }
...@@ -4606,6 +4604,46 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { ...@@ -4606,6 +4604,46 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write .write = i915_hpd_storm_ctl_write
}; };
static int i915_drrs_ctl_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *intel_crtc;
struct intel_encoder *encoder;
struct intel_dp *intel_dp;
if (INTEL_GEN(dev_priv) < 7)
return -ENODEV;
drm_modeset_lock_all(dev);
for_each_intel_crtc(dev, intel_crtc) {
if (!intel_crtc->base.state->active ||
!intel_crtc->config->has_drrs)
continue;
for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
val ? "en" : "dis", val);
intel_dp = enc_to_intel_dp(&encoder->base);
if (val)
intel_edp_drrs_enable(intel_dp,
intel_crtc->config);
else
intel_edp_drrs_disable(intel_dp,
intel_crtc->config);
}
}
drm_modeset_unlock_all(dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
static const struct drm_info_list i915_debugfs_list[] = { static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0}, {"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_objects", i915_gem_object_info, 0},
...@@ -4683,7 +4721,8 @@ static const struct i915_debugfs_files { ...@@ -4683,7 +4721,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_active", &i915_displayport_test_active_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops}, {"i915_guc_log_control", &i915_guc_log_control_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops} {"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops}
}; };
int i915_debugfs_register(struct drm_i915_private *dev_priv) int i915_debugfs_register(struct drm_i915_private *dev_priv)
......
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
static struct drm_driver driver; static struct drm_driver driver;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_load_fail_count; static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line) bool __i915_inject_load_failure(const char *func, int line)
...@@ -70,6 +71,7 @@ bool __i915_inject_load_failure(const char *func, int line) ...@@ -70,6 +71,7 @@ bool __i915_inject_load_failure(const char *func, int line)
return false; return false;
} }
#endif
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
...@@ -107,8 +109,12 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, ...@@ -107,8 +109,12 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv) static bool i915_error_injected(struct drm_i915_private *dev_priv)
{ {
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
return i915_modparams.inject_load_failure && return i915_modparams.inject_load_failure &&
i915_load_fail_count == i915_modparams.inject_load_failure; i915_load_fail_count == i915_modparams.inject_load_failure;
#else
return false;
#endif
} }
#define i915_load_error(dev_priv, fmt, ...) \ #define i915_load_error(dev_priv, fmt, ...) \
...@@ -176,96 +182,103 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) ...@@ -176,96 +182,103 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* of only checking the first one. * of only checking the first one.
*/ */
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) { unsigned short id;
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
if (pch->vendor != PCI_VENDOR_ID_INTEL)
dev_priv->pch_id = id; continue;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); dev_priv->pch_id = id;
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN6(dev_priv) && WARN_ON(!IS_GEN5(dev_priv));
!IS_IVYBRIDGE(dev_priv)); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT;
/* PantherPoint is CPT compatible */ DRM_DEBUG_KMS("Found CougarPoint PCH\n");
dev_priv->pch_type = PCH_CPT; WARN_ON(!IS_GEN6(dev_priv) &&
DRM_DEBUG_KMS("Found PantherPoint PCH\n"); !IS_IVYBRIDGE(dev_priv));
WARN_ON(!IS_GEN6(dev_priv) && } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
!IS_IVYBRIDGE(dev_priv)); /* PantherPoint is CPT compatible */
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT;
dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found PantherPoint PCH\n");
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_GEN6(dev_priv) &&
WARN_ON(!IS_HASWELL(dev_priv) && !IS_IVYBRIDGE(dev_priv));
!IS_BROADWELL(dev_priv)); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
WARN_ON(IS_HSW_ULT(dev_priv) || dev_priv->pch_type = PCH_LPT;
IS_BDW_ULT(dev_priv)); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { WARN_ON(!IS_HASWELL(dev_priv) &&
dev_priv->pch_type = PCH_LPT; !IS_BROADWELL(dev_priv));
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(IS_HSW_ULT(dev_priv) ||
WARN_ON(!IS_HASWELL(dev_priv) && IS_BDW_ULT(dev_priv));
!IS_BROADWELL(dev_priv)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
WARN_ON(!IS_HSW_ULT(dev_priv) && dev_priv->pch_type = PCH_LPT;
!IS_BDW_ULT(dev_priv)); DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
} else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { WARN_ON(!IS_HASWELL(dev_priv) &&
/* WildcatPoint is LPT compatible */ !IS_BROADWELL(dev_priv));
dev_priv->pch_type = PCH_LPT; WARN_ON(!IS_HSW_ULT(dev_priv) &&
DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); !IS_BDW_ULT(dev_priv));
WARN_ON(!IS_HASWELL(dev_priv) && } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
!IS_BROADWELL(dev_priv)); /* WildcatPoint is LPT compatible */
WARN_ON(IS_HSW_ULT(dev_priv) || dev_priv->pch_type = PCH_LPT;
IS_BDW_ULT(dev_priv)); DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
} else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { WARN_ON(!IS_HASWELL(dev_priv) &&
/* WildcatPoint is LPT compatible */ !IS_BROADWELL(dev_priv));
dev_priv->pch_type = PCH_LPT; WARN_ON(IS_HSW_ULT(dev_priv) ||
DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); IS_BDW_ULT(dev_priv));
WARN_ON(!IS_HASWELL(dev_priv) && } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
!IS_BROADWELL(dev_priv)); /* WildcatPoint is LPT compatible */
WARN_ON(!IS_HSW_ULT(dev_priv) && dev_priv->pch_type = PCH_LPT;
!IS_BDW_ULT(dev_priv)); DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { WARN_ON(!IS_HASWELL(dev_priv) &&
dev_priv->pch_type = PCH_SPT; !IS_BROADWELL(dev_priv));
DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); WARN_ON(!IS_HSW_ULT(dev_priv) &&
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_BDW_ULT(dev_priv));
!IS_KABYLAKE(dev_priv)); } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT;
dev_priv->pch_type = PCH_SPT; DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); WARN_ON(!IS_SKYLAKE(dev_priv) &&
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
!IS_KABYLAKE(dev_priv)); } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_SPT;
dev_priv->pch_type = PCH_KBP; DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); WARN_ON(!IS_SKYLAKE(dev_priv) &&
WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
!IS_KABYLAKE(dev_priv) && } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
!IS_COFFEELAKE(dev_priv)); dev_priv->pch_type = PCH_KBP;
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
dev_priv->pch_type = PCH_CNP; WARN_ON(!IS_SKYLAKE(dev_priv) &&
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); !IS_KABYLAKE(dev_priv) &&
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
!IS_COFFEELAKE(dev_priv)); } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
} else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CNP;
dev_priv->pch_type = PCH_CNP; DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) &&
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
!IS_COFFEELAKE(dev_priv)); } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || dev_priv->pch_type = PCH_CNP;
id == INTEL_PCH_P3X_DEVICE_ID_TYPE || DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
(id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && WARN_ON(!IS_CANNONLAKE(dev_priv) &&
pch->subsystem_vendor == !IS_COFFEELAKE(dev_priv));
PCI_SUBVENDOR_ID_REDHAT_QUMRANET && } else if (id == INTEL_PCH_ICP_DEVICE_ID_TYPE) {
pch->subsystem_device == dev_priv->pch_type = PCH_ICP;
PCI_SUBDEVICE_ID_QEMU)) { DRM_DEBUG_KMS("Found Ice Lake PCH\n");
dev_priv->pch_type = WARN_ON(!IS_ICELAKE(dev_priv));
intel_virt_detect_pch(dev_priv); } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
} else id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
continue; (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
pch->subsystem_vendor ==
break; PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_type = intel_virt_detect_pch(dev_priv);
} else {
continue;
} }
break;
} }
if (!pch) if (!pch)
DRM_DEBUG_KMS("No PCH found.\n"); DRM_DEBUG_KMS("No PCH found.\n");
...@@ -622,7 +635,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv) ...@@ -622,7 +635,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
i915_gem_contexts_fini(dev_priv); i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
intel_uc_fini_wq(dev_priv); intel_uc_fini_misc(dev_priv);
i915_gem_cleanup_userptr(dev_priv); i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv); i915_gem_drain_freed_objects(dev_priv);
...@@ -2596,6 +2609,11 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2596,6 +2609,11 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv);
i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
return ret; return ret;
...@@ -2661,8 +2679,6 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -2661,8 +2679,6 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv)) if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
intel_guc_resume(dev_priv);
if (IS_GEN9_LP(dev_priv)) { if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv); bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true); bxt_display_core_init(dev_priv, true);
...@@ -2677,6 +2693,10 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -2677,6 +2693,10 @@ static int intel_runtime_resume(struct device *kdev)
intel_uncore_runtime_resume(dev_priv); intel_uncore_runtime_resume(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv);
/* /*
* No point of rolling back things in case of an error, as the best * No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
...@@ -2684,8 +2704,6 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -2684,8 +2704,6 @@ static int intel_runtime_resume(struct device *kdev)
i915_gem_init_swizzling(dev_priv); i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv); i915_gem_restore_fences(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
/* /*
* On VLV/CHV display interrupts are part of the display * On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For * power well, so hpd is reinitialized from there. For
......
...@@ -83,8 +83,8 @@ ...@@ -83,8 +83,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20171222" #define DRIVER_DATE "20180207"
#define DRIVER_TIMESTAMP 1513971710 #define DRIVER_TIMESTAMP 1517988364
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -104,9 +104,13 @@ ...@@ -104,9 +104,13 @@
#define I915_STATE_WARN_ON(x) \ #define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
bool __i915_inject_load_failure(const char *func, int line); bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \ #define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__) __i915_inject_load_failure(__func__, __LINE__)
#else
#define i915_inject_load_failure() false
#endif
typedef struct { typedef struct {
uint32_t val; uint32_t val;
...@@ -453,9 +457,9 @@ struct intel_display_error_state; ...@@ -453,9 +457,9 @@ struct intel_display_error_state;
struct i915_gpu_state { struct i915_gpu_state {
struct kref ref; struct kref ref;
struct timeval time; ktime_t time;
struct timeval boottime; ktime_t boottime;
struct timeval uptime; ktime_t uptime;
struct drm_i915_private *i915; struct drm_i915_private *i915;
...@@ -551,6 +555,7 @@ struct i915_gpu_state { ...@@ -551,6 +555,7 @@ struct i915_gpu_state {
int ban_score; int ban_score;
int active; int active;
int guilty; int guilty;
bool bannable;
} context; } context;
struct drm_i915_error_object { struct drm_i915_error_object {
...@@ -754,7 +759,6 @@ struct i915_drrs { ...@@ -754,7 +759,6 @@ struct i915_drrs {
struct i915_psr { struct i915_psr {
struct mutex lock; struct mutex lock;
bool sink_support; bool sink_support;
bool source_ok;
struct intel_dp *enabled; struct intel_dp *enabled;
bool active; bool active;
struct delayed_work work; struct delayed_work work;
...@@ -783,6 +787,7 @@ enum intel_pch { ...@@ -783,6 +787,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint PCH */ PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */ PCH_KBP, /* Kaby Lake PCH */
PCH_CNP, /* Cannon Lake PCH */ PCH_CNP, /* Cannon Lake PCH */
PCH_ICP, /* Ice Lake PCH */
PCH_NOP, PCH_NOP,
}; };
...@@ -1255,6 +1260,7 @@ enum modeset_restore { ...@@ -1255,6 +1260,7 @@ enum modeset_restore {
#define DP_AUX_B 0x10 #define DP_AUX_B 0x10
#define DP_AUX_C 0x20 #define DP_AUX_C 0x20
#define DP_AUX_D 0x30 #define DP_AUX_D 0x30
#define DP_AUX_F 0x60
#define DDC_PIN_B 0x05 #define DDC_PIN_B 0x05
#define DDC_PIN_C 0x04 #define DDC_PIN_C 0x04
...@@ -1281,6 +1287,7 @@ struct ddi_vbt_port_info { ...@@ -1281,6 +1287,7 @@ struct ddi_vbt_port_info {
uint8_t dp_boost_level; uint8_t dp_boost_level;
uint8_t hdmi_boost_level; uint8_t hdmi_boost_level;
int dp_max_link_rate; /* 0 for not limited by VBT */
}; };
enum psr_lines_to_wait { enum psr_lines_to_wait {
...@@ -1460,6 +1467,7 @@ struct skl_wm_params { ...@@ -1460,6 +1467,7 @@ struct skl_wm_params {
uint_fixed_16_16_t plane_blocks_per_line; uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum; uint_fixed_16_16_t y_tile_minimum;
uint32_t linetime_us; uint32_t linetime_us;
uint32_t dbuf_block_size;
}; };
/* /*
...@@ -1792,7 +1800,7 @@ struct i915_oa_ops { ...@@ -1792,7 +1800,7 @@ struct i915_oa_ops {
}; };
struct intel_cdclk_state { struct intel_cdclk_state {
unsigned int cdclk, vco, ref; unsigned int cdclk, vco, ref, bypass;
u8 voltage_level; u8 voltage_level;
}; };
...@@ -2312,6 +2320,12 @@ struct drm_i915_private { ...@@ -2312,6 +2320,12 @@ struct drm_i915_private {
*/ */
bool awake; bool awake;
/**
* The number of times we have woken up.
*/
unsigned int epoch;
#define I915_EPOCH_INVALID 0
/** /**
* We leave the user IRQ off as much as possible, * We leave the user IRQ off as much as possible,
* but this means that requests will finish and never * but this means that requests will finish and never
...@@ -2404,16 +2418,11 @@ enum hdmi_force_audio { ...@@ -2404,16 +2418,11 @@ enum hdmi_force_audio {
* *
* We have one bit per pipe and per scanout plane type. * We have one bit per pipe and per scanout plane type.
*/ */
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ #define INTEL_FRONTBUFFER(pipe, plane_id) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
(1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
...@@ -2595,6 +2604,7 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2595,6 +2604,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
...@@ -2646,6 +2656,8 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2646,6 +2656,8 @@ intel_info(const struct drm_i915_private *dev_priv)
(dev_priv)->info.gt == 2) (dev_priv)->info.gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 3) (dev_priv)->info.gt == 3)
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
...@@ -2706,6 +2718,7 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2706,6 +2718,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9))) #define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv)) #define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
...@@ -2843,11 +2856,13 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2843,11 +2856,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_CNP_LP(dev_priv) \ #define HAS_PCH_CNP_LP(dev_priv) \
((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
...@@ -2950,8 +2965,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -2950,8 +2965,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
enum port intel_hpd_pin_to_port(enum hpd_pin pin); enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
enum hpd_pin intel_hpd_pin(enum port port); enum hpd_pin pin);
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
...@@ -3718,9 +3735,10 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, ...@@ -3718,9 +3735,10 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
u32 val, int timeout_us); u32 val, int fast_timeout_us,
int slow_timeout_ms);
#define sandybridge_pcode_write(dev_priv, mbox, val) \ #define sandybridge_pcode_write(dev_priv, mbox, val) \
sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500) sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms); u32 reply_mask, u32 reply, int timeout_base_ms);
......
...@@ -369,7 +369,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence, ...@@ -369,7 +369,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
if (i915_gem_request_completed(rq)) if (i915_gem_request_completed(rq))
goto out; goto out;
/* This client is about to stall waiting for the GPU. In many cases /*
* This client is about to stall waiting for the GPU. In many cases
* this is undesirable and limits the throughput of the system, as * this is undesirable and limits the throughput of the system, as
* many clients cannot continue processing user input/output whilst * many clients cannot continue processing user input/output whilst
* blocked. RPS autotuning may take tens of milliseconds to respond * blocked. RPS autotuning may take tens of milliseconds to respond
...@@ -384,11 +385,9 @@ i915_gem_object_wait_fence(struct dma_fence *fence, ...@@ -384,11 +385,9 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow * forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period. * each client to waitboost once in a busy period.
*/ */
if (rps_client) { if (rps_client && !i915_gem_request_started(rq)) {
if (INTEL_GEN(rq->i915) >= 6) if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps_client); gen6_rps_boost(rq, rps_client);
else
rps_client = NULL;
} }
timeout = i915_wait_request(rq, flags, timeout); timeout = i915_wait_request(rq, flags, timeout);
...@@ -2824,24 +2823,23 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, ...@@ -2824,24 +2823,23 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
static bool ban_context(const struct i915_gem_context *ctx,
unsigned int score)
{
return (i915_gem_context_is_bannable(ctx) &&
score >= CONTEXT_SCORE_BAN_THRESHOLD);
}
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{ {
unsigned int score;
bool banned; bool banned;
atomic_inc(&ctx->guilty_count); atomic_inc(&ctx->guilty_count);
score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); banned = false;
banned = ban_context(ctx, score); if (i915_gem_context_is_bannable(ctx)) {
DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", unsigned int score;
ctx->name, score, yesno(banned));
score = atomic_add_return(CONTEXT_SCORE_GUILTY,
&ctx->ban_score);
banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
ctx->name, score, yesno(banned));
}
if (!banned) if (!banned)
return; return;
...@@ -3135,7 +3133,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) ...@@ -3135,7 +3133,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
* an incoherent read by the CS (presumably stale TLB). An * an incoherent read by the CS (presumably stale TLB). An
* empty request appears sufficient to paper over the glitch. * empty request appears sufficient to paper over the glitch.
*/ */
if (list_empty(&engine->timeline->requests)) { if (intel_engine_is_idle(engine)) {
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
rq = i915_gem_request_alloc(engine, rq = i915_gem_request_alloc(engine,
...@@ -3200,6 +3198,13 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) ...@@ -3200,6 +3198,13 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer(__func__);
for_each_engine(engine, i915, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
/* /*
* First, stop submission to hw, but do not yet complete requests by * First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests * rolling the global seqno forward (since this would complete requests
...@@ -3334,6 +3339,65 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -3334,6 +3339,65 @@ i915_gem_retire_work_handler(struct work_struct *work)
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
} }
static void shrink_caches(struct drm_i915_private *i915)
{
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
kmem_cache_shrink(i915->priorities);
kmem_cache_shrink(i915->dependencies);
kmem_cache_shrink(i915->requests);
kmem_cache_shrink(i915->luts);
kmem_cache_shrink(i915->vmas);
kmem_cache_shrink(i915->objects);
}
struct sleep_rcu_work {
union {
struct rcu_head rcu;
struct work_struct work;
};
struct drm_i915_private *i915;
unsigned int epoch;
};
static inline bool
same_epoch(struct drm_i915_private *i915, unsigned int epoch)
{
/*
* There is a small chance that the epoch wrapped since we started
* sleeping. If we assume that epoch is at least a u32, then it will
* take at least 2^32 * 100ms for it to wrap, or about 326 years.
*/
return epoch == READ_ONCE(i915->gt.epoch);
}
static void __sleep_work(struct work_struct *work)
{
struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
struct drm_i915_private *i915 = s->i915;
unsigned int epoch = s->epoch;
kfree(s);
if (same_epoch(i915, epoch))
shrink_caches(i915);
}
static void __sleep_rcu(struct rcu_head *rcu)
{
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
} else {
kfree(s);
}
}
static inline bool static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915) new_requests_since_last_retire(const struct drm_i915_private *i915)
{ {
...@@ -3346,6 +3410,7 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3346,6 +3410,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work); container_of(work, typeof(*dev_priv), gt.idle_work.work);
unsigned int epoch = I915_EPOCH_INVALID;
bool rearm_hangcheck; bool rearm_hangcheck;
ktime_t end; ktime_t end;
...@@ -3405,6 +3470,8 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3405,6 +3470,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
GEM_BUG_ON(!dev_priv->gt.awake); GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false; dev_priv->gt.awake = false;
epoch = dev_priv->gt.epoch;
GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
rearm_hangcheck = false; rearm_hangcheck = false;
if (INTEL_GEN(dev_priv) >= 6) if (INTEL_GEN(dev_priv) >= 6)
...@@ -3421,6 +3488,23 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3421,6 +3488,23 @@ i915_gem_idle_work_handler(struct work_struct *work)
GEM_BUG_ON(!dev_priv->gt.awake); GEM_BUG_ON(!dev_priv->gt.awake);
i915_queue_hangcheck(dev_priv); i915_queue_hangcheck(dev_priv);
} }
/*
* When we are idle, it is an opportune time to reap our caches.
* However, we have many objects that utilise RCU and the ordered
* i915->wq that this work is executing on. To try and flush any
* pending frees now we are idle, we first wait for an RCU grace
* period, and then queue a task (that will run last on the wq) to
* shrink and re-optimize the caches.
*/
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
}
}
} }
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
...@@ -3566,7 +3650,7 @@ static int wait_for_engines(struct drm_i915_private *i915) ...@@ -3566,7 +3650,7 @@ static int wait_for_engines(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
intel_engine_dump(engine, &p, intel_engine_dump(engine, &p,
"%s", engine->name); "%s\n", engine->name);
} }
i915_gem_set_wedged(i915); i915_gem_set_wedged(i915);
...@@ -4698,7 +4782,8 @@ static void __i915_gem_free_work(struct work_struct *work) ...@@ -4698,7 +4782,8 @@ static void __i915_gem_free_work(struct work_struct *work)
container_of(work, struct drm_i915_private, mm.free_work); container_of(work, struct drm_i915_private, mm.free_work);
struct llist_node *freed; struct llist_node *freed;
/* All file-owned VMA should have been released by this point through /*
* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close(). * i915_gem_close_object(), or earlier by i915_gem_context_close().
* However, the object may also be bound into the global GTT (e.g. * However, the object may also be bound into the global GTT (e.g.
* older GPUs without per-process support, or for direct access through * older GPUs without per-process support, or for direct access through
...@@ -4725,13 +4810,18 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) ...@@ -4725,13 +4810,18 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu); container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* We can't simply use call_rcu() from i915_gem_free_object() /*
* as we need to block whilst unbinding, and the call_rcu * Since we require blocking on struct_mutex to unbind the freed
* task may be called from softirq context. So we take a * object from the GPU before releasing resources back to the
* detour through a worker. * system, we can not do that directly from the RCU callback (which may
* be a softirq context), but must instead then defer that work onto a
* kthread. We use the RCU callback rather than move the freed object
* directly onto the work queue so that we can mix between using the
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/ */
if (llist_add(&obj->freed, &i915->mm.free_list)) if (llist_add(&obj->freed, &i915->mm.free_list))
schedule_work(&i915->mm.free_work); queue_work(i915->wq, &i915->mm.free_work);
} }
void i915_gem_free_object(struct drm_gem_object *gem_obj) void i915_gem_free_object(struct drm_gem_object *gem_obj)
...@@ -4744,7 +4834,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4744,7 +4834,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (discard_backing_storage(obj)) if (discard_backing_storage(obj))
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
/* Before we free the object, make sure any pure RCU-only /*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g. * read-side critical sections are complete, e.g.
* i915_gem_busy_ioctl(). For the corresponding synchronized * i915_gem_busy_ioctl(). For the corresponding synchronized
* lookup see i915_gem_object_lookup_rcu(). * lookup see i915_gem_object_lookup_rcu().
...@@ -5186,7 +5277,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -5186,7 +5277,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret) if (ret)
return ret; return ret;
ret = intel_uc_init_wq(dev_priv); ret = intel_uc_init_misc(dev_priv);
if (ret) if (ret)
return ret; return ret;
...@@ -5282,7 +5373,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -5282,7 +5373,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
intel_uc_fini_wq(dev_priv); intel_uc_fini_misc(dev_priv);
if (ret != -EIO) if (ret != -EIO)
i915_gem_cleanup_userptr(dev_priv); i915_gem_cleanup_userptr(dev_priv);
......
...@@ -230,10 +230,14 @@ static int fence_update(struct drm_i915_fence_reg *fence, ...@@ -230,10 +230,14 @@ static int fence_update(struct drm_i915_fence_reg *fence,
} }
if (fence->vma) { if (fence->vma) {
ret = i915_gem_active_retire(&fence->vma->last_fence, struct i915_vma *old = fence->vma;
&fence->vma->obj->base.dev->struct_mutex);
ret = i915_gem_active_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;
i915_vma_flush_writes(old);
} }
if (fence->vma && fence->vma != vma) { if (fence->vma && fence->vma != vma) {
......
...@@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm, ...@@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{ {
struct page *page = NULL; unsigned long size;
dma_addr_t addr;
int order;
/* /*
* In order to utilize 64K pages for an object with a size < 2M, we will * In order to utilize 64K pages for an object with a size < 2M, we will
...@@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) ...@@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* TODO: we should really consider write-protecting the scratch-page and * TODO: we should really consider write-protecting the scratch-page and
* sharing between ppgtt * sharing between ppgtt
*/ */
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) && if (i915_vm_is_48bit(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
order = get_order(I915_GTT_PAGE_SIZE_64K); size = I915_GTT_PAGE_SIZE_64K;
page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order); gfp |= __GFP_NOWARN;
if (page) {
addr = dma_map_page(vm->dma, page, 0,
I915_GTT_PAGE_SIZE_64K,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(vm->dma, addr))) {
__free_pages(page, order);
page = NULL;
}
if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
dma_unmap_page(vm->dma, addr,
I915_GTT_PAGE_SIZE_64K,
PCI_DMA_BIDIRECTIONAL);
__free_pages(page, order);
page = NULL;
}
}
} }
gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
if (!page) { do {
order = 0; int order = get_order(size);
page = alloc_page(gfp | __GFP_ZERO); struct page *page;
dma_addr_t addr;
page = alloc_pages(gfp, order);
if (unlikely(!page)) if (unlikely(!page))
return -ENOMEM; goto skip;
addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE, addr = dma_map_page(vm->dma, page, 0, size,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(vm->dma, addr))) { if (unlikely(dma_mapping_error(vm->dma, addr)))
__free_page(page); goto free_page;
return -ENOMEM;
}
}
vm->scratch_page.page = page; if (unlikely(!IS_ALIGNED(addr, size)))
vm->scratch_page.daddr = addr; goto unmap_page;
vm->scratch_page.order = order;
return 0; vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
vm->scratch_page.order = order;
return 0;
unmap_page:
dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
free_page:
__free_pages(page, order);
skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
size = I915_GTT_PAGE_SIZE_4K;
gfp &= ~__GFP_NOWARN;
} while (1);
} }
static void cleanup_scratch_page(struct i915_address_space *vm) static void cleanup_scratch_page(struct i915_address_space *vm)
...@@ -2370,9 +2367,10 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, ...@@ -2370,9 +2367,10 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages) struct sg_table *pages)
{ {
do { do {
if (dma_map_sg(&obj->base.dev->pdev->dev, if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
pages->sgl, pages->nents, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL)) PCI_DMA_BIDIRECTIONAL,
DMA_ATTR_NO_WARN))
return 0; return 0;
/* If the DMA remap fails, one cause can be that we have /* If the DMA remap fails, one cause can be that we have
......
...@@ -161,12 +161,16 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) ...@@ -161,12 +161,16 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
GEM_BUG_ON(!list_empty(&pt->link)); GEM_BUG_ON(!list_empty(&pt->link));
/* Everyone we depended upon (the fences we wait to be signaled) /*
* Everyone we depended upon (the fences we wait to be signaled)
* should retire before us and remove themselves from our list. * should retire before us and remove themselves from our list.
* However, retirement is run independently on each timeline and * However, retirement is run independently on each timeline and
* so we may be called out-of-order. * so we may be called out-of-order.
*/ */
list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
GEM_BUG_ON(!i915_priotree_signaled(dep->signaler));
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->wait_link); list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC) if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep); i915_dependency_free(i915, dep);
...@@ -174,6 +178,9 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) ...@@ -174,6 +178,9 @@ i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
/* Remove ourselves from everyone who depends upon us */ /* Remove ourselves from everyone who depends upon us */
list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
GEM_BUG_ON(dep->signaler != pt);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
list_del(&dep->signal_link); list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC) if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(i915, dep); i915_dependency_free(i915, dep);
...@@ -267,6 +274,8 @@ static void mark_busy(struct drm_i915_private *i915) ...@@ -267,6 +274,8 @@ static void mark_busy(struct drm_i915_private *i915)
intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
i915->gt.awake = true; i915->gt.awake = true;
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
intel_enable_gt_powersave(i915); intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915); i915_update_gfx_val(i915);
...@@ -436,7 +445,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) ...@@ -436,7 +445,10 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
spin_lock_irq(&request->lock); spin_lock_irq(&request->lock);
if (request->waitboost) if (request->waitboost)
atomic_dec(&request->i915->gt_pm.rps.num_waiters); atomic_dec(&request->i915->gt_pm.rps.num_waiters);
dma_fence_signal_locked(&request->fence); if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
dma_fence_signal_locked(&request->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_cancel_signaling(request);
spin_unlock_irq(&request->lock); spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree); i915_priotree_fini(request->i915, &request->priotree);
...@@ -530,6 +542,8 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) ...@@ -530,6 +542,8 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
*/ */
GEM_BUG_ON(!request->global_seqno); GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine),
request->global_seqno));
engine->timeline->seqno--; engine->timeline->seqno--;
/* We may be recursing from the signal callback of another i915 fence */ /* We may be recursing from the signal callback of another i915 fence */
...@@ -691,6 +705,17 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -691,6 +705,17 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret) if (ret)
goto err_unreserve; goto err_unreserve;
/*
* We've forced the client to stall and catch up with whatever
* backlog there might have been. As we are assuming that we
* caused the mempressure, now is an opportune time to
* recover as much memory from the request pool as is possible.
* Having already penalized the client to stall, we spend
* a little extra time to re-optimise page allocation.
*/
kmem_cache_shrink(dev_priv->requests);
rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
if (!req) { if (!req) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -722,6 +747,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -722,6 +747,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
/* No zalloc, must clear what we need by hand */ /* No zalloc, must clear what we need by hand */
req->global_seqno = 0; req->global_seqno = 0;
req->signaling.wait.seqno = 0;
req->file_priv = NULL; req->file_priv = NULL;
req->batch = NULL; req->batch = NULL;
req->capture_list = NULL; req->capture_list = NULL;
......
...@@ -245,18 +245,6 @@ i915_gem_request_put(struct drm_i915_gem_request *req) ...@@ -245,18 +245,6 @@ i915_gem_request_put(struct drm_i915_gem_request *req)
dma_fence_put(&req->fence); dma_fence_put(&req->fence);
} }
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
if (src)
i915_gem_request_get(src);
if (*pdst)
i915_gem_request_put(*pdst);
*pdst = src;
}
/** /**
* i915_gem_request_global_seqno - report the current global seqno * i915_gem_request_global_seqno - report the current global seqno
* @request - the request * @request - the request
...@@ -341,6 +329,27 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req) ...@@ -341,6 +329,27 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
return __i915_gem_request_completed(req, seqno); return __i915_gem_request_completed(req, seqno);
} }
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
u32 seqno;
seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno - 1);
}
static inline bool i915_priotree_signaled(const struct i915_priotree *pt)
{
const struct drm_i915_gem_request *rq =
container_of(pt, const struct drm_i915_gem_request, priotree);
return i915_gem_request_completed(rq);
}
/* We treat requests as fences. This is not be to confused with our /* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the * We use the fences to synchronize access from the CPU with activity on the
......
...@@ -34,16 +34,25 @@ ...@@ -34,16 +34,25 @@
#include "i915_drv.h" #include "i915_drv.h"
static const char *engine_str(int engine) static inline const struct intel_engine_cs *
{ engine_lookup(const struct drm_i915_private *i915, unsigned int id)
switch (engine) { {
case RCS: return "render"; if (id >= I915_NUM_ENGINES)
case VCS: return "bsd"; return NULL;
case BCS: return "blt";
case VECS: return "vebox"; return i915->engine[id];
case VCS2: return "bsd2"; }
default: return "";
} static inline const char *
__engine_name(const struct intel_engine_cs *engine)
{
return engine ? engine->name : "";
}
static const char *
engine_name(const struct drm_i915_private *i915, unsigned int id)
{
return __engine_name(engine_lookup(i915, id));
} }
static const char *tiling_flag(int tiling) static const char *tiling_flag(int tiling)
...@@ -345,7 +354,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, ...@@ -345,7 +354,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_puts(m, purgeable_flag(err->purgeable)); err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : ""); err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->engine != -1 ? " " : ""); err_puts(m, err->engine != -1 ? " " : "");
err_puts(m, engine_str(err->engine)); err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name) if (err->name)
...@@ -387,6 +396,11 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, ...@@ -387,6 +396,11 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]); ee->instdone.row[slice][subslice]);
} }
static const char *bannable(const struct drm_i915_error_context *ctx)
{
return ctx->bannable ? "" : " (unbannable)";
}
static void error_print_request(struct drm_i915_error_state_buf *m, static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix, const char *prefix,
const struct drm_i915_error_request *erq) const struct drm_i915_error_request *erq)
...@@ -405,9 +419,10 @@ static void error_print_context(struct drm_i915_error_state_buf *m, ...@@ -405,9 +419,10 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header, const char *header,
const struct drm_i915_error_context *ctx) const struct drm_i915_error_context *ctx)
{ {
err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d guilty %d active %d\n", err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
ctx->priority, ctx->ban_score, ctx->guilty, ctx->active); ctx->priority, ctx->ban_score, bannable(ctx),
ctx->guilty, ctx->active);
} }
static void error_print_engine(struct drm_i915_error_state_buf *m, static void error_print_engine(struct drm_i915_error_state_buf *m,
...@@ -415,7 +430,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ...@@ -415,7 +430,8 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
{ {
int n; int n;
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); err_printf(m, "%s command stream:\n",
engine_name(m->i915, ee->engine_id));
err_printf(m, " IDLE?: %s\n", yesno(ee->idle)); err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
...@@ -610,6 +626,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -610,6 +626,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
{ {
struct drm_i915_private *dev_priv = m->i915; struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj; struct drm_i915_error_object *obj;
struct timespec64 ts;
int i, j; int i, j;
if (!error) { if (!error) {
...@@ -620,21 +637,25 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -620,21 +637,25 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (*error->error_msg) if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg); err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n"); err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "Time: %ld s %ld us\n", ts = ktime_to_timespec64(error->time);
error->time.tv_sec, error->time.tv_usec); err_printf(m, "Time: %lld s %ld us\n",
err_printf(m, "Boottime: %ld s %ld us\n", (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
error->boottime.tv_sec, error->boottime.tv_usec); ts = ktime_to_timespec64(error->boottime);
err_printf(m, "Uptime: %ld s %ld us\n", err_printf(m, "Boottime: %lld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec); (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) { for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].hangcheck_stalled && if (error->engine[i].hangcheck_stalled &&
error->engine[i].context.pid) { error->engine[i].context.pid) {
err_printf(m, "Active process (on ring %s): %s [%d], score %d\n", err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
engine_str(i), engine_name(m->i915, i),
error->engine[i].context.comm, error->engine[i].context.comm,
error->engine[i].context.pid, error->engine[i].context.pid,
error->engine[i].context.ban_score); error->engine[i].context.ban_score,
bannable(&error->engine[i].context));
} }
} }
err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Reset count: %u\n", error->reset_count);
...@@ -722,12 +743,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -722,12 +743,13 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) { if (obj) {
err_puts(m, dev_priv->engine[i]->name); err_puts(m, dev_priv->engine[i]->name);
if (ee->context.pid) if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)", err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm, ee->context.comm,
ee->context.pid, ee->context.pid,
ee->context.handle, ee->context.handle,
ee->context.hw_id, ee->context.hw_id,
ee->context.ban_score); ee->context.ban_score,
bannable(&ee->context));
err_printf(m, " --- gtt_offset = 0x%08x %08x\n", err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset), upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset)); lower_32_bits(obj->gtt_offset));
...@@ -1369,6 +1391,7 @@ static void record_context(struct drm_i915_error_context *e, ...@@ -1369,6 +1391,7 @@ static void record_context(struct drm_i915_error_context *e,
e->hw_id = ctx->hw_id; e->hw_id = ctx->hw_id;
e->priority = ctx->priority; e->priority = ctx->priority;
e->ban_score = atomic_read(&ctx->ban_score); e->ban_score = atomic_read(&ctx->ban_score);
e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count); e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count); e->active = atomic_read(&ctx->active_count);
} }
...@@ -1737,11 +1760,10 @@ static int capture(void *data) ...@@ -1737,11 +1760,10 @@ static int capture(void *data)
{ {
struct i915_gpu_state *error = data; struct i915_gpu_state *error = data;
do_gettimeofday(&error->time); error->time = ktime_get_real();
error->boottime = ktime_to_timeval(ktime_get_boottime()); error->boottime = ktime_get_boottime();
error->uptime = error->uptime = ktime_sub(ktime_get(),
ktime_to_timeval(ktime_sub(ktime_get(), error->i915->gt.last_init_time);
error->i915->gt.last_init_time));
capture_params(error); capture_params(error);
capture_uc_state(error); capture_uc_state(error);
......
...@@ -452,6 +452,8 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) ...@@ -452,6 +452,8 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
{ {
assert_rpm_wakelock_held(dev_priv);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
...@@ -459,6 +461,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) ...@@ -459,6 +461,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
{ {
assert_rpm_wakelock_held(dev_priv);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (!dev_priv->guc.interrupts_enabled) { if (!dev_priv->guc.interrupts_enabled) {
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
...@@ -471,6 +475,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) ...@@ -471,6 +475,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
{ {
assert_rpm_wakelock_held(dev_priv);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
dev_priv->guc.interrupts_enabled = false; dev_priv->guc.interrupts_enabled = false;
...@@ -1407,37 +1413,25 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1407,37 +1413,25 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
tasklet_hi_schedule(&execlists->tasklet); tasklet_hi_schedule(&execlists->tasklet);
} }
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, static void gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
u32 master_ctl, u32 master_ctl, u32 gt_iir[4])
u32 gt_iir[4])
{ {
irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
if (gt_iir[0]) { if (gt_iir[0])
I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
} }
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
if (gt_iir[1]) { if (gt_iir[1])
I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
} }
if (master_ctl & GEN8_GT_VECS_IRQ) { if (master_ctl & GEN8_GT_VECS_IRQ) {
gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
if (gt_iir[3]) { if (gt_iir[3])
I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
ret = IRQ_HANDLED;
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
} }
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
...@@ -1447,12 +1441,8 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, ...@@ -1447,12 +1441,8 @@ static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN8_GT_IIR(2), I915_WRITE_FW(GEN8_GT_IIR(2),
gt_iir[2] & (dev_priv->pm_rps_events | gt_iir[2] & (dev_priv->pm_rps_events |
dev_priv->pm_guc_events)); dev_priv->pm_guc_events));
ret = IRQ_HANDLED; }
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
} }
return ret;
} }
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
...@@ -1568,10 +1558,11 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) ...@@ -1568,10 +1558,11 @@ static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
* *
* Note that the caller is expected to zero out the masks initially. * Note that the caller is expected to zero out the masks initially.
*/ */
static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
u32 hotplug_trigger, u32 dig_hotplug_reg, u32 *pin_mask, u32 *long_mask,
const u32 hpd[HPD_NUM_PINS], u32 hotplug_trigger, u32 dig_hotplug_reg,
bool long_pulse_detect(enum port port, u32 val)) const u32 hpd[HPD_NUM_PINS],
bool long_pulse_detect(enum port port, u32 val))
{ {
enum port port; enum port port;
int i; int i;
...@@ -1582,7 +1573,7 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, ...@@ -1582,7 +1573,7 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
*pin_mask |= BIT(i); *pin_mask |= BIT(i);
port = intel_hpd_pin_to_port(i); port = intel_hpd_pin_to_port(dev_priv, i);
if (port == PORT_NONE) if (port == PORT_NONE)
continue; continue;
...@@ -1970,8 +1961,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -1970,8 +1961,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) { if (hotplug_trigger) {
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, hpd_status_g4x, hotplug_trigger, hotplug_trigger,
hpd_status_g4x,
i9xx_port_hotplug_long_detect); i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
...@@ -1983,8 +1975,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -1983,8 +1975,9 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
if (hotplug_trigger) { if (hotplug_trigger) {
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, hpd_status_i915, hotplug_trigger, hotplug_trigger,
hpd_status_i915,
i9xx_port_hotplug_long_detect); i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
} }
...@@ -2185,7 +2178,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -2185,7 +2178,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!hotplug_trigger) if (!hotplug_trigger)
return; return;
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd, dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect); pch_port_hotplug_long_detect);
...@@ -2327,8 +2320,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) ...@@ -2327,8 +2320,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
dig_hotplug_reg, hpd_spt, hotplug_trigger, dig_hotplug_reg, hpd_spt,
spt_port_hotplug_long_detect); spt_port_hotplug_long_detect);
} }
...@@ -2338,8 +2331,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) ...@@ -2338,8 +2331,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
dig_hotplug_reg, hpd_spt, hotplug2_trigger, dig_hotplug_reg, hpd_spt,
spt_port_hotplug2_long_detect); spt_port_hotplug2_long_detect);
} }
...@@ -2359,7 +2352,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -2359,7 +2352,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd, dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect); ilk_port_hotplug_long_detect);
...@@ -2536,7 +2529,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -2536,7 +2529,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
dig_hotplug_reg, hpd, dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect); bxt_port_hotplug_long_detect);
...@@ -2579,6 +2572,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ...@@ -2579,6 +2572,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D; GEN9_AUX_CHANNEL_D;
if (IS_CNL_WITH_PORT_F(dev_priv))
tmp_mask |= CNL_AUX_CHANNEL_F;
if (iir & tmp_mask) { if (iir & tmp_mask) {
dp_aux_irq_handler(dev_priv); dp_aux_irq_handler(dev_priv);
found = true; found = true;
...@@ -2683,7 +2679,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2683,7 +2679,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
u32 master_ctl; u32 master_ctl;
u32 gt_iir[4] = {}; u32 gt_iir[4] = {};
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv)) if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE; return IRQ_NONE;
...@@ -2699,16 +2694,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2699,16 +2694,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
/* Find, clear, then process each source of interrupt */ /* Find, clear, then process each source of interrupt */
ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_handler(dev_priv, gt_iir); gen8_gt_irq_handler(dev_priv, gt_iir);
ret |= gen8_de_irq_handler(dev_priv, master_ctl); gen8_de_irq_handler(dev_priv, master_ctl);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ_FW(GEN8_MASTER_IRQ); POSTING_READ_FW(GEN8_MASTER_IRQ);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
return ret; return IRQ_HANDLED;
} }
struct wedge_me { struct wedge_me {
...@@ -3611,6 +3606,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3611,6 +3606,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
} }
if (IS_CNL_WITH_PORT_F(dev_priv))
de_port_masked |= CNL_AUX_CHANNEL_F;
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN; GEN8_PIPE_FIFO_UNDERRUN;
......
...@@ -155,7 +155,8 @@ i915_param_named_unsafe(enable_guc, int, 0400, ...@@ -155,7 +155,8 @@ i915_param_named_unsafe(enable_guc, int, 0400,
"(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)"); "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
i915_param_named(guc_log_level, int, 0400, i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); "GuC firmware logging level. Requires GuC to be loaded. "
"(-1=auto [default], 0=disable, 1..4=enable with verbosity min..max)");
i915_param_named_unsafe(guc_firmware_path, charp, 0400, i915_param_named_unsafe(guc_firmware_path, charp, 0400,
"GuC firmware path to use instead of the default one"); "GuC firmware path to use instead of the default one");
...@@ -166,8 +167,10 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400, ...@@ -166,8 +167,10 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400,
i915_param_named_unsafe(enable_dp_mst, bool, 0600, i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
i915_param_named_unsafe(inject_load_failure, uint, 0400, i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
i915_param_named(enable_dpcd_backlight, bool, 0600, i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)"); "Enable support for DPCD backlight control (default:false)");
......
...@@ -48,7 +48,7 @@ struct drm_printer; ...@@ -48,7 +48,7 @@ struct drm_printer;
param(int, enable_ips, 1) \ param(int, enable_ips, 1) \
param(int, invert_brightness, 0) \ param(int, invert_brightness, 0) \
param(int, enable_guc, 0) \ param(int, enable_guc, 0) \
param(int, guc_log_level, -1) \ param(int, guc_log_level, 0) \
param(char *, guc_firmware_path, NULL) \ param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \
param(int, mmio_debug, 0) \ param(int, mmio_debug, 0) \
......
...@@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { ...@@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
.ddb_size = 1024, \ .ddb_size = 1024, \
GLK_COLORS GLK_COLORS
static const struct intel_device_info intel_cannonlake_gt2_info = { static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, GEN10_FEATURES,
.is_alpha_support = 1, .is_alpha_support = 1,
.platform = INTEL_CANNONLAKE, .platform = INTEL_CANNONLAKE,
...@@ -579,6 +579,19 @@ static const struct intel_device_info intel_cannonlake_gt2_info = { ...@@ -579,6 +579,19 @@ static const struct intel_device_info intel_cannonlake_gt2_info = {
.gt = 2, .gt = 2,
}; };
#define GEN11_FEATURES \
GEN10_FEATURES, \
.gen = 11, \
.ddb_size = 2048, \
.has_csr = 0
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
.platform = INTEL_ICELAKE,
.is_alpha_support = 1,
.has_resource_streamer = 0,
};
/* /*
* Make sure any device matches here are from most specific to most * Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem * general. For example, since the Quanta match is based on the subsystem
...@@ -636,8 +649,7 @@ static const struct pci_device_id pciidlist[] = { ...@@ -636,8 +649,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info), INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
{0, 0, 0} {0, 0, 0}
}; };
MODULE_DEVICE_TABLE(pci, pciidlist); MODULE_DEVICE_TABLE(pci, pciidlist);
......
...@@ -285,28 +285,69 @@ static u64 count_interrupts(struct drm_i915_private *i915) ...@@ -285,28 +285,69 @@ static u64 count_interrupts(struct drm_i915_private *i915)
return sum; return sum;
} }
static void i915_pmu_event_destroy(struct perf_event *event) static void engine_event_destroy(struct perf_event *event)
{ {
WARN_ON(event->parent); struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
if (WARN_ON_ONCE(!engine))
return;
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine))
intel_disable_engine_stats(engine);
} }
static int engine_event_init(struct perf_event *event) static void i915_pmu_event_destroy(struct perf_event *event)
{ {
struct drm_i915_private *i915 = WARN_ON(event->parent);
container_of(event->pmu, typeof(*i915), pmu.base);
if (!intel_engine_lookup_user(i915, engine_event_class(event), if (is_engine_event(event))
engine_event_instance(event))) engine_event_destroy(event);
return -ENODEV; }
switch (engine_event_sample(event)) { static int
engine_event_status(struct intel_engine_cs *engine,
enum drm_i915_pmu_engine_sample sample)
{
switch (sample) {
case I915_SAMPLE_BUSY: case I915_SAMPLE_BUSY:
case I915_SAMPLE_WAIT: case I915_SAMPLE_WAIT:
break; break;
case I915_SAMPLE_SEMA: case I915_SAMPLE_SEMA:
if (INTEL_GEN(engine->i915) < 6)
return -ENODEV;
break;
default:
return -ENOENT;
}
return 0;
}
static int
config_status(struct drm_i915_private *i915, u64 config)
{
switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
return -ENODEV;
/* Fall-through. */
case I915_PMU_REQUESTED_FREQUENCY:
if (INTEL_GEN(i915) < 6) if (INTEL_GEN(i915) < 6)
return -ENODEV; return -ENODEV;
break; break;
case I915_PMU_INTERRUPTS:
break;
case I915_PMU_RC6_RESIDENCY:
if (!HAS_RC6(i915))
return -ENODEV;
break;
default: default:
return -ENOENT; return -ENOENT;
} }
...@@ -314,6 +355,30 @@ static int engine_event_init(struct perf_event *event) ...@@ -314,6 +355,30 @@ static int engine_event_init(struct perf_event *event)
return 0; return 0;
} }
static int engine_event_init(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct intel_engine_cs *engine;
u8 sample;
int ret;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
engine_event_instance(event));
if (!engine)
return -ENODEV;
sample = engine_event_sample(event);
ret = engine_event_status(engine, sample);
if (ret)
return ret;
if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
ret = intel_enable_engine_stats(engine);
return ret;
}
static int i915_pmu_event_init(struct perf_event *event) static int i915_pmu_event_init(struct perf_event *event)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
...@@ -337,30 +402,10 @@ static int i915_pmu_event_init(struct perf_event *event) ...@@ -337,30 +402,10 @@ static int i915_pmu_event_init(struct perf_event *event)
if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
return -EINVAL; return -EINVAL;
if (is_engine_event(event)) { if (is_engine_event(event))
ret = engine_event_init(event); ret = engine_event_init(event);
} else { else
ret = 0; ret = config_status(i915, event->attr.config);
switch (event->attr.config) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
ret = -ENODEV;
case I915_PMU_REQUESTED_FREQUENCY:
if (INTEL_GEN(i915) < 6)
ret = -ENODEV;
break;
case I915_PMU_INTERRUPTS:
break;
case I915_PMU_RC6_RESIDENCY:
if (!HAS_RC6(i915))
ret = -ENODEV;
break;
default:
ret = -ENOENT;
break;
}
}
if (ret) if (ret)
return ret; return ret;
...@@ -387,7 +432,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) ...@@ -387,7 +432,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
if (WARN_ON_ONCE(!engine)) { if (WARN_ON_ONCE(!engine)) {
/* Do nothing */ /* Do nothing */
} else if (sample == I915_SAMPLE_BUSY && } else if (sample == I915_SAMPLE_BUSY &&
engine->pmu.busy_stats) { intel_engine_supports_stats(engine)) {
val = ktime_to_ns(intel_engine_get_busy_time(engine)); val = ktime_to_ns(intel_engine_get_busy_time(engine));
} else { } else {
val = engine->pmu.sample[sample].cur; val = engine->pmu.sample[sample].cur;
...@@ -442,12 +487,6 @@ static void i915_pmu_event_read(struct perf_event *event) ...@@ -442,12 +487,6 @@ static void i915_pmu_event_read(struct perf_event *event)
local64_add(new - prev, &event->count); local64_add(new - prev, &event->count);
} }
static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
{
return intel_engine_supports_stats(engine) &&
(engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
}
static void i915_pmu_enable(struct perf_event *event) static void i915_pmu_enable(struct perf_event *event)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
...@@ -487,21 +526,7 @@ static void i915_pmu_enable(struct perf_event *event) ...@@ -487,21 +526,7 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
if (engine->pmu.enable_count[sample]++ == 0) { engine->pmu.enable_count[sample]++;
/*
* Enable engine busy stats tracking if needed or
* alternatively cancel the scheduled disable.
*
* If the delayed disable was pending, cancel it and
* in this case do not enable since it already is.
*/
if (engine_needs_busy_stats(engine) &&
!engine->pmu.busy_stats) {
engine->pmu.busy_stats = true;
if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
intel_enable_engine_stats(engine);
}
}
} }
/* /*
...@@ -514,14 +539,6 @@ static void i915_pmu_enable(struct perf_event *event) ...@@ -514,14 +539,6 @@ static void i915_pmu_enable(struct perf_event *event)
spin_unlock_irqrestore(&i915->pmu.lock, flags); spin_unlock_irqrestore(&i915->pmu.lock, flags);
} }
static void __disable_busy_stats(struct work_struct *work)
{
struct intel_engine_cs *engine =
container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
intel_disable_engine_stats(engine);
}
static void i915_pmu_disable(struct perf_event *event) static void i915_pmu_disable(struct perf_event *event)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
...@@ -545,26 +562,8 @@ static void i915_pmu_disable(struct perf_event *event) ...@@ -545,26 +562,8 @@ static void i915_pmu_disable(struct perf_event *event)
* Decrement the reference count and clear the enabled * Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away. * bitmask when the last listener on an event goes away.
*/ */
if (--engine->pmu.enable_count[sample] == 0) { if (--engine->pmu.enable_count[sample] == 0)
engine->pmu.enable &= ~BIT(sample); engine->pmu.enable &= ~BIT(sample);
if (!engine_needs_busy_stats(engine) &&
engine->pmu.busy_stats) {
engine->pmu.busy_stats = false;
/*
* We request a delayed disable to handle the
* rapid on/off cycles on events, which can
* happen when tools like perf stat start, in a
* nicer way.
*
* In addition, this also helps with busy stats
* accuracy with background CPU offline/online
* migration events.
*/
queue_delayed_work(system_wq,
&engine->pmu.disable_busy_stats,
round_jiffies_up_relative(HZ));
}
}
} }
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
...@@ -657,52 +656,9 @@ static ssize_t i915_pmu_event_show(struct device *dev, ...@@ -657,52 +656,9 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val); return sprintf(buf, "config=0x%lx\n", eattr->val);
} }
#define I915_EVENT_ATTR(_name, _config) \ static struct attribute_group i915_pmu_events_attr_group = {
(&((struct i915_ext_attribute[]) { \
{ .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
.val = _config, } \
})[0].attr.attr)
#define I915_EVENT_STR(_name, _str) \
(&((struct perf_pmu_events_attr[]) { \
{ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
.id = 0, \
.event_str = _str, } \
})[0].attr.attr)
#define I915_EVENT(_name, _config, _unit) \
I915_EVENT_ATTR(_name, _config), \
I915_EVENT_STR(_name.unit, _unit)
#define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
I915_EVENT_STR(_name.unit, "ns")
#define I915_ENGINE_EVENTS(_name, _class, _instance) \
I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
static struct attribute *i915_pmu_events_attrs[] = {
I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS),
I915_EVENT(rc6-residency, I915_PMU_RC6_RESIDENCY, "ns"),
NULL,
};
static const struct attribute_group i915_pmu_events_attr_group = {
.name = "events", .name = "events",
.attrs = i915_pmu_events_attrs, /* Patch in attrs at runtime. */
}; };
static ssize_t static ssize_t
...@@ -720,7 +676,7 @@ static struct attribute *i915_cpumask_attrs[] = { ...@@ -720,7 +676,7 @@ static struct attribute *i915_cpumask_attrs[] = {
NULL, NULL,
}; };
static struct attribute_group i915_pmu_cpumask_attr_group = { static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs, .attrs = i915_cpumask_attrs,
}; };
...@@ -731,6 +687,193 @@ static const struct attribute_group *i915_pmu_attr_groups[] = { ...@@ -731,6 +687,193 @@ static const struct attribute_group *i915_pmu_attr_groups[] = {
NULL NULL
}; };
#define __event(__config, __name, __unit) \
{ \
.config = (__config), \
.name = (__name), \
.unit = (__unit), \
}
#define __engine_event(__sample, __name) \
{ \
.sample = (__sample), \
.name = (__name), \
}
static struct i915_ext_attribute *
add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
{
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
attr->attr.show = i915_pmu_event_show;
attr->val = config;
return ++attr;
}
static struct perf_pmu_events_attr *
add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
const char *str)
{
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
attr->attr.show = perf_event_sysfs_show;
attr->event_str = str;
return ++attr;
}
static struct attribute **
create_event_attributes(struct drm_i915_private *i915)
{
static const struct {
u64 config;
const char *name;
const char *unit;
} events[] = {
__event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
__event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
__event(I915_PMU_INTERRUPTS, "interrupts", NULL),
__event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
char *name;
} engine_events[] = {
__engine_event(I915_SAMPLE_BUSY, "busy"),
__engine_event(I915_SAMPLE_SEMA, "sema"),
__engine_event(I915_SAMPLE_WAIT, "wait"),
};
unsigned int count = 0;
struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int i;
/* Count how many counters we will be exposing. */
for (i = 0; i < ARRAY_SIZE(events); i++) {
if (!config_status(i915, events[i].config))
count++;
}
for_each_engine(engine, i915, id) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
if (!engine_event_status(engine,
engine_events[i].sample))
count++;
}
}
/* Allocate attribute objects and table. */
i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
if (!i915_attr)
goto err_alloc;
pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
if (!pmu_attr)
goto err_alloc;
/* Max one pointer of each attribute type plus a termination entry. */
attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
if (!attr)
goto err_alloc;
i915_iter = i915_attr;
pmu_iter = pmu_attr;
attr_iter = attr;
/* Initialize supported non-engine counters. */
for (i = 0; i < ARRAY_SIZE(events); i++) {
char *str;
if (config_status(i915, events[i].config))
continue;
str = kstrdup(events[i].name, GFP_KERNEL);
if (!str)
goto err;
*attr_iter++ = &i915_iter->attr.attr;
i915_iter = add_i915_attr(i915_iter, str, events[i].config);
if (events[i].unit) {
str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
if (!str)
goto err;
*attr_iter++ = &pmu_iter->attr.attr;
pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
}
}
/* Initialize supported engine counters. */
for_each_engine(engine, i915, id) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
char *str;
if (engine_event_status(engine,
engine_events[i].sample))
continue;
str = kasprintf(GFP_KERNEL, "%s-%s",
engine->name, engine_events[i].name);
if (!str)
goto err;
*attr_iter++ = &i915_iter->attr.attr;
i915_iter =
add_i915_attr(i915_iter, str,
__I915_PMU_ENGINE(engine->uabi_class,
engine->instance,
engine_events[i].sample));
str = kasprintf(GFP_KERNEL, "%s-%s.unit",
engine->name, engine_events[i].name);
if (!str)
goto err;
*attr_iter++ = &pmu_iter->attr.attr;
pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
}
}
i915->pmu.i915_attr = i915_attr;
i915->pmu.pmu_attr = pmu_attr;
return attr;
err:;
for (attr_iter = attr; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
err_alloc:
kfree(attr);
kfree(i915_attr);
kfree(pmu_attr);
return NULL;
}
static void free_event_attributes(struct drm_i915_private *i915)
{
struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
for (; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
kfree(i915_pmu_events_attr_group.attrs);
kfree(i915->pmu.i915_attr);
kfree(i915->pmu.pmu_attr);
i915_pmu_events_attr_group.attrs = NULL;
i915->pmu.i915_attr = NULL;
i915->pmu.pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{ {
struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
...@@ -797,8 +940,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) ...@@ -797,8 +940,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret; int ret;
if (INTEL_GEN(i915) <= 2) { if (INTEL_GEN(i915) <= 2) {
...@@ -806,6 +947,12 @@ void i915_pmu_register(struct drm_i915_private *i915) ...@@ -806,6 +947,12 @@ void i915_pmu_register(struct drm_i915_private *i915)
return; return;
} }
i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
if (!i915_pmu_events_attr_group.attrs) {
ret = -ENOMEM;
goto err;
}
i915->pmu.base.attr_groups = i915_pmu_attr_groups; i915->pmu.base.attr_groups = i915_pmu_attr_groups;
i915->pmu.base.task_ctx_nr = perf_invalid_context; i915->pmu.base.task_ctx_nr = perf_invalid_context;
i915->pmu.base.event_init = i915_pmu_event_init; i915->pmu.base.event_init = i915_pmu_event_init;
...@@ -820,10 +967,6 @@ void i915_pmu_register(struct drm_i915_private *i915) ...@@ -820,10 +967,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
i915->pmu.timer.function = i915_sample; i915->pmu.timer.function = i915_sample;
for_each_engine(engine, i915, id)
INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
__disable_busy_stats);
ret = perf_pmu_register(&i915->pmu.base, "i915", -1); ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
if (ret) if (ret)
goto err; goto err;
...@@ -838,14 +981,12 @@ void i915_pmu_register(struct drm_i915_private *i915) ...@@ -838,14 +981,12 @@ void i915_pmu_register(struct drm_i915_private *i915)
perf_pmu_unregister(&i915->pmu.base); perf_pmu_unregister(&i915->pmu.base);
err: err:
i915->pmu.base.event_init = NULL; i915->pmu.base.event_init = NULL;
free_event_attributes(i915);
DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
} }
void i915_pmu_unregister(struct drm_i915_private *i915) void i915_pmu_unregister(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine;
enum intel_engine_id id;
if (!i915->pmu.base.event_init) if (!i915->pmu.base.event_init)
return; return;
...@@ -853,13 +994,9 @@ void i915_pmu_unregister(struct drm_i915_private *i915) ...@@ -853,13 +994,9 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
hrtimer_cancel(&i915->pmu.timer); hrtimer_cancel(&i915->pmu.timer);
for_each_engine(engine, i915, id) {
GEM_BUG_ON(engine->pmu.busy_stats);
flush_delayed_work(&engine->pmu.disable_busy_stats);
}
i915_pmu_unregister_cpuhp_state(i915); i915_pmu_unregister_cpuhp_state(i915);
perf_pmu_unregister(&i915->pmu.base); perf_pmu_unregister(&i915->pmu.base);
i915->pmu.base.event_init = NULL; i915->pmu.base.event_init = NULL;
free_event_attributes(i915);
} }
...@@ -94,6 +94,14 @@ struct i915_pmu { ...@@ -94,6 +94,14 @@ struct i915_pmu {
* struct intel_engine_cs. * struct intel_engine_cs.
*/ */
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
/**
* @pmu_attr: Memory block holding device attributes.
*/
void *pmu_attr;
}; };
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
......
...@@ -1304,6 +1304,7 @@ enum i915_power_well_id { ...@@ -1304,6 +1304,7 @@ enum i915_power_well_id {
SKL_DISP_PW_DDI_B, SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C, SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D, SKL_DISP_PW_DDI_D,
CNL_DISP_PW_DDI_F = 6,
GLK_DISP_PW_AUX_A = 8, GLK_DISP_PW_AUX_A = 8,
GLK_DISP_PW_AUX_B, GLK_DISP_PW_AUX_B,
...@@ -1312,6 +1313,7 @@ enum i915_power_well_id { ...@@ -1312,6 +1313,7 @@ enum i915_power_well_id {
CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B, CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B,
CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C, CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C,
CNL_DISP_PW_AUX_D, CNL_DISP_PW_AUX_D,
CNL_DISP_PW_AUX_F,
SKL_DISP_PW_1 = 14, SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2, SKL_DISP_PW_2,
...@@ -1963,7 +1965,7 @@ enum i915_power_well_id { ...@@ -1963,7 +1965,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW2_LN0_B 0x162648 #define _CNL_PORT_TX_DW2_LN0_B 0x162648
#define _CNL_PORT_TX_DW2_LN0_C 0x162C48 #define _CNL_PORT_TX_DW2_LN0_C 0x162C48
#define _CNL_PORT_TX_DW2_LN0_D 0x162E48 #define _CNL_PORT_TX_DW2_LN0_D 0x162E48
#define _CNL_PORT_TX_DW2_LN0_F 0x162A48 #define _CNL_PORT_TX_DW2_LN0_F 0x162848
#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \ #define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \
_CNL_PORT_TX_DW2_GRP_AE, \ _CNL_PORT_TX_DW2_GRP_AE, \
_CNL_PORT_TX_DW2_GRP_B, \ _CNL_PORT_TX_DW2_GRP_B, \
...@@ -2590,6 +2592,8 @@ enum i915_power_well_id { ...@@ -2590,6 +2592,8 @@ enum i915_power_well_id {
#define GFX_FORWARD_VBLANK_ALWAYS (1<<5) #define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
#define GFX_FORWARD_VBLANK_COND (2<<5) #define GFX_FORWARD_VBLANK_COND (2<<5)
#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3)
#define VLV_DISPLAY_BASE 0x180000 #define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE #define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000 #define BXT_MIPI_BASE 0x60000
...@@ -2648,6 +2652,31 @@ enum i915_power_well_id { ...@@ -2648,6 +2652,31 @@ enum i915_power_well_id {
#define LM_FIFO_WATERMARK 0x0000001F #define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */ #define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
#define MBUS_ABOX_CTL _MMIO(0x45038)
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
#define MBUS_ABOX_B_CREDIT(x) ((x) << 16)
#define MBUS_ABOX_BT_CREDIT_POOL2_MASK (0x1F << 8)
#define MBUS_ABOX_BT_CREDIT_POOL2(x) ((x) << 8)
#define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0)
#define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0)
#define _PIPEA_MBUS_DBOX_CTL 0x7003C
#define _PIPEB_MBUS_DBOX_CTL 0x7103C
#define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \
_PIPEB_MBUS_DBOX_CTL)
#define MBUS_DBOX_BW_CREDIT_MASK (3 << 14)
#define MBUS_DBOX_BW_CREDIT(x) ((x) << 14)
#define MBUS_DBOX_B_CREDIT_MASK (0x1F << 8)
#define MBUS_DBOX_B_CREDIT(x) ((x) << 8)
#define MBUS_DBOX_A_CREDIT_MASK (0xF << 0)
#define MBUS_DBOX_A_CREDIT(x) ((x) << 0)
#define MBUS_UBOX_CTL _MMIO(0x4503C)
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
/* Make render/texture TLB fetches lower priorty than associated data /* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default * fetches. This is not turned on by default
*/ */
...@@ -3063,7 +3092,12 @@ enum i915_power_well_id { ...@@ -3063,7 +3092,12 @@ enum i915_power_well_id {
#define GMBUS_PIN_2_BXT 2 #define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3 #define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4 #define GMBUS_PIN_4_CNP 4
#define GMBUS_NUM_PINS 7 /* including 0 */ #define GMBUS_PIN_9_TC1_ICP 9
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
#define GMBUS_PIN_12_TC4_ICP 12
#define GMBUS_NUM_PINS 13 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ #define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1<<31) #define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30) #define GMBUS_SW_RDY (1<<30)
...@@ -4066,7 +4100,7 @@ enum { ...@@ -4066,7 +4100,7 @@ enum {
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) #define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ #define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40) #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29) #define EDP_PSR_STATUS_STATE_MASK (7<<29)
#define EDP_PSR_STATUS_STATE_IDLE (0<<29) #define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) #define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
...@@ -4093,7 +4127,7 @@ enum { ...@@ -4093,7 +4127,7 @@ enum {
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) #define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff #define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60) #define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60)
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28) #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) #define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
...@@ -4116,7 +4150,7 @@ enum { ...@@ -4116,7 +4150,7 @@ enum {
#define EDP_PSR2_IDLE_MASK 0xf #define EDP_PSR2_IDLE_MASK 0xf
#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4) #define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940) #define EDP_PSR2_STATUS _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) #define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28 #define EDP_PSR2_STATUS_STATE_SHIFT 28
...@@ -5278,6 +5312,13 @@ enum { ...@@ -5278,6 +5312,13 @@ enum {
#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320) #define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324) #define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c)
#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
#define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_CTL(port) _MMIO_PORT(port, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ #define DP_AUX_CH_DATA(port, i) _MMIO(_PORT(port, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
...@@ -6310,6 +6351,11 @@ enum { ...@@ -6310,6 +6351,11 @@ enum {
#define _PLANE_CTL_3_A 0x70380 #define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31) #define PLANE_CTL_ENABLE (1 << 31)
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */ #define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */
/*
* ICL+ uses the same PLANE_CTL_FORMAT bits, but the field definition
* expanded to include bit 23 as well. However, the shift-24 based values
* correctly map to the same formats in ICL, as long as bit 23 is set to 0
*/
#define PLANE_CTL_FORMAT_MASK (0xf << 24) #define PLANE_CTL_FORMAT_MASK (0xf << 24)
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24) #define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24) #define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
...@@ -6319,6 +6365,7 @@ enum { ...@@ -6319,6 +6365,7 @@ enum {
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24) #define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24) #define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24) #define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ #define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) #define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21) #define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
...@@ -6933,6 +6980,7 @@ enum { ...@@ -6933,6 +6980,7 @@ enum {
#define GEN8_DE_PORT_IMR _MMIO(0x44444) #define GEN8_DE_PORT_IMR _MMIO(0x44444)
#define GEN8_DE_PORT_IIR _MMIO(0x44448) #define GEN8_DE_PORT_IIR _MMIO(0x44448)
#define GEN8_DE_PORT_IER _MMIO(0x4444c) #define GEN8_DE_PORT_IER _MMIO(0x4444c)
#define CNL_AUX_CHANNEL_F (1 << 28)
#define GEN9_AUX_CHANNEL_D (1 << 27) #define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26) #define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25) #define GEN9_AUX_CHANNEL_B (1 << 25)
...@@ -6957,6 +7005,69 @@ enum { ...@@ -6957,6 +7005,69 @@ enum {
#define GEN8_PCU_IIR _MMIO(0x444e8) #define GEN8_PCU_IIR _MMIO(0x444e8)
#define GEN8_PCU_IER _MMIO(0x444ec) #define GEN8_PCU_IER _MMIO(0x444ec)
#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
#define GEN11_MASTER_IRQ (1 << 31)
#define GEN11_PCU_IRQ (1 << 30)
#define GEN11_DISPLAY_IRQ (1 << 16)
#define GEN11_GT_DW_IRQ(x) (1 << (x))
#define GEN11_GT_DW1_IRQ (1 << 1)
#define GEN11_GT_DW0_IRQ (1 << 0)
#define GEN11_DISPLAY_INT_CTL _MMIO(0x44200)
#define GEN11_DISPLAY_IRQ_ENABLE (1 << 31)
#define GEN11_AUDIO_CODEC_IRQ (1 << 24)
#define GEN11_DE_PCH_IRQ (1 << 23)
#define GEN11_DE_MISC_IRQ (1 << 22)
#define GEN11_DE_PORT_IRQ (1 << 20)
#define GEN11_DE_PIPE_C (1 << 18)
#define GEN11_DE_PIPE_B (1 << 17)
#define GEN11_DE_PIPE_A (1 << 16)
#define GEN11_GT_INTR_DW0 _MMIO(0x190018)
#define GEN11_CSME (31)
#define GEN11_GUNIT (28)
#define GEN11_GUC (25)
#define GEN11_WDPERF (20)
#define GEN11_KCR (19)
#define GEN11_GTPM (16)
#define GEN11_BCS (15)
#define GEN11_RCS0 (0)
#define GEN11_GT_INTR_DW1 _MMIO(0x19001c)
#define GEN11_VECS(x) (31 - (x))
#define GEN11_VCS(x) (x)
#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4))
#define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060)
#define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064)
#define GEN11_INTR_DATA_VALID (1 << 31)
#define GEN11_INTR_ENGINE_MASK (0xffff)
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4))
#define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070)
#define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074)
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4))
#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
#define GEN11_CRYPTO_RSVD_INTR_ENABLE _MMIO(0x190040)
#define GEN11_GUNIT_CSME_INTR_ENABLE _MMIO(0x190044)
#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
#define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0)
#define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4)
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */ /* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25) #define ILK_ELPIN_409_SELECT (1 << 25)
...@@ -7011,8 +7122,12 @@ enum { ...@@ -7011,8 +7122,12 @@ enum {
#define CHICKEN_TRANS_A 0x420c0 #define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4 #define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12) #define DDI_TRAINING_OVERRIDE_ENABLE (1<<19)
#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15) #define DDI_TRAINING_OVERRIDE_VALUE (1<<18)
#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */
#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
#define DISP_ARB_CTL _MMIO(0x45000) #define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31) #define DISP_FBC_MEMORY_WAKE (1<<31)
...@@ -7351,6 +7466,8 @@ enum { ...@@ -7351,6 +7466,8 @@ enum {
#define CNP_RAWCLK_DIV(div) ((div) << 16) #define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26) #define CNP_RAWCLK_FRAC_MASK (0xf << 26)
#define CNP_RAWCLK_FRAC(frac) ((frac) << 26) #define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
#define ICP_RAWCLK_DEN(den) ((den) << 26)
#define ICP_RAWCLK_NUM(num) ((num) << 11)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208) #define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
...@@ -8350,6 +8467,19 @@ enum skl_power_gate { ...@@ -8350,6 +8467,19 @@ enum skl_power_gate {
#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
#define _CNL_AUX_REG_IDX(pw) ((pw) - 9)
#define _CNL_AUX_ANAOVRD1_B 0x162250
#define _CNL_AUX_ANAOVRD1_C 0x162210
#define _CNL_AUX_ANAOVRD1_D 0x1622D0
#define _CNL_AUX_ANAOVRD1_F 0x162A90
#define CNL_AUX_ANAOVRD1(pw) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \
_CNL_AUX_ANAOVRD1_B, \
_CNL_AUX_ANAOVRD1_C, \
_CNL_AUX_ANAOVRD1_D, \
_CNL_AUX_ANAOVRD1_F))
#define CNL_AUX_ANAOVRD1_ENABLE (1<<16)
#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23)
/* Per-pipe DDI Function Control */ /* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400 #define _TRANS_DDI_FUNC_CTL_B 0x61400
...@@ -8663,10 +8793,12 @@ enum skl_power_gate { ...@@ -8663,10 +8793,12 @@ enum skl_power_gate {
* CNL Clocks * CNL Clocks
*/ */
#define DPCLKA_CFGCR0 _MMIO(0x6C200) #define DPCLKA_CFGCR0 _MMIO(0x6C200)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port)+10)) #define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << ((port)*2)) (port)+10))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port)*2) #define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << ((port)*2)) (port)*2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
/* CNL PLL */ /* CNL PLL */
#define DPLL0_ENABLE 0x46010 #define DPLL0_ENABLE 0x46010
...@@ -8762,6 +8894,7 @@ enum skl_power_gate { ...@@ -8762,6 +8894,7 @@ enum skl_power_gate {
#define SFUSE_STRAP_RAW_FREQUENCY (1<<8) #define SFUSE_STRAP_RAW_FREQUENCY (1<<8)
#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) #define SFUSE_STRAP_DISPLAY_DISABLED (1<<7)
#define SFUSE_STRAP_CRT_DISABLED (1<<6) #define SFUSE_STRAP_CRT_DISABLED (1<<6)
#define SFUSE_STRAP_DDIF_DETECTED (1<<3)
#define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0) #define SFUSE_STRAP_DDID_DETECTED (1<<0)
......
...@@ -365,18 +365,31 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, ...@@ -365,18 +365,31 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_dma_fence_cb { struct i915_sw_dma_fence_cb {
struct dma_fence_cb base; struct dma_fence_cb base;
struct i915_sw_fence *fence; struct i915_sw_fence *fence;
};
struct i915_sw_dma_fence_cb_timer {
struct i915_sw_dma_fence_cb base;
struct dma_fence *dma; struct dma_fence *dma;
struct timer_list timer; struct timer_list timer;
struct irq_work work; struct irq_work work;
struct rcu_head rcu; struct rcu_head rcu;
}; };
static void dma_i915_sw_fence_wake(struct dma_fence *dma,
struct dma_fence_cb *data)
{
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
i915_sw_fence_complete(cb->fence);
kfree(cb);
}
static void timer_i915_sw_fence_wake(struct timer_list *t) static void timer_i915_sw_fence_wake(struct timer_list *t)
{ {
struct i915_sw_dma_fence_cb *cb = from_timer(cb, t, timer); struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
struct i915_sw_fence *fence; struct i915_sw_fence *fence;
fence = xchg(&cb->fence, NULL); fence = xchg(&cb->base.fence, NULL);
if (!fence) if (!fence)
return; return;
...@@ -388,13 +401,14 @@ static void timer_i915_sw_fence_wake(struct timer_list *t) ...@@ -388,13 +401,14 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
i915_sw_fence_complete(fence); i915_sw_fence_complete(fence);
} }
static void dma_i915_sw_fence_wake(struct dma_fence *dma, static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
struct dma_fence_cb *data) struct dma_fence_cb *data)
{ {
struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); struct i915_sw_dma_fence_cb_timer *cb =
container_of(data, typeof(*cb), base.base);
struct i915_sw_fence *fence; struct i915_sw_fence *fence;
fence = xchg(&cb->fence, NULL); fence = xchg(&cb->base.fence, NULL);
if (fence) if (fence)
i915_sw_fence_complete(fence); i915_sw_fence_complete(fence);
...@@ -403,7 +417,8 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma, ...@@ -403,7 +417,8 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
static void irq_i915_sw_fence_work(struct irq_work *wrk) static void irq_i915_sw_fence_work(struct irq_work *wrk)
{ {
struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work); struct i915_sw_dma_fence_cb_timer *cb =
container_of(wrk, typeof(*cb), work);
del_timer_sync(&cb->timer); del_timer_sync(&cb->timer);
dma_fence_put(cb->dma); dma_fence_put(cb->dma);
...@@ -417,6 +432,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, ...@@ -417,6 +432,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
gfp_t gfp) gfp_t gfp)
{ {
struct i915_sw_dma_fence_cb *cb; struct i915_sw_dma_fence_cb *cb;
dma_fence_func_t func;
int ret; int ret;
debug_fence_assert(fence); debug_fence_assert(fence);
...@@ -425,7 +441,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, ...@@ -425,7 +441,10 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
if (dma_fence_is_signaled(dma)) if (dma_fence_is_signaled(dma))
return 0; return 0;
cb = kmalloc(sizeof(*cb), gfp); cb = kmalloc(timeout ?
sizeof(struct i915_sw_dma_fence_cb_timer) :
sizeof(struct i915_sw_dma_fence_cb),
gfp);
if (!cb) { if (!cb) {
if (!gfpflags_allow_blocking(gfp)) if (!gfpflags_allow_blocking(gfp))
return -ENOMEM; return -ENOMEM;
...@@ -436,19 +455,26 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, ...@@ -436,19 +455,26 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
cb->fence = fence; cb->fence = fence;
i915_sw_fence_await(fence); i915_sw_fence_await(fence);
cb->dma = NULL; func = dma_i915_sw_fence_wake;
timer_setup(&cb->timer, timer_i915_sw_fence_wake, TIMER_IRQSAFE);
init_irq_work(&cb->work, irq_i915_sw_fence_work);
if (timeout) { if (timeout) {
cb->dma = dma_fence_get(dma); struct i915_sw_dma_fence_cb_timer *timer =
mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); container_of(cb, typeof(*timer), base);
timer->dma = dma_fence_get(dma);
init_irq_work(&timer->work, irq_i915_sw_fence_work);
timer_setup(&timer->timer,
timer_i915_sw_fence_wake, TIMER_IRQSAFE);
mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
func = dma_i915_sw_fence_wake_timer;
} }
ret = dma_fence_add_callback(dma, &cb->base, dma_i915_sw_fence_wake); ret = dma_fence_add_callback(dma, &cb->base, func);
if (ret == 0) { if (ret == 0) {
ret = 1; ret = 1;
} else { } else {
dma_i915_sw_fence_wake(dma, &cb->base); func(dma, &cb->base);
if (ret == -ENOENT) /* fence already signaled */ if (ret == -ENOENT) /* fence already signaled */
ret = 0; ret = 0;
} }
......
...@@ -56,7 +56,6 @@ intel_create_plane_state(struct drm_plane *plane) ...@@ -56,7 +56,6 @@ intel_create_plane_state(struct drm_plane *plane)
state->base.plane = plane; state->base.plane = plane;
state->base.rotation = DRM_MODE_ROTATE_0; state->base.rotation = DRM_MODE_ROTATE_0;
state->ckey.flags = I915_SET_COLORKEY_NONE;
return state; return state;
} }
......
...@@ -1146,6 +1146,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1146,6 +1146,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
{DVO_PORT_HDMID, DVO_PORT_DPD, -1}, {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
{DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
}; };
/* /*
...@@ -1273,6 +1274,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1273,6 +1274,27 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n", DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level); port_name(port), info->hdmi_boost_level);
} }
/* DP max link rate for CNL+ */
if (bdb_version >= 216) {
switch (child->dp_max_link_rate) {
default:
case VBT_DP_MAX_LINK_RATE_HBR3:
info->dp_max_link_rate = 810000;
break;
case VBT_DP_MAX_LINK_RATE_HBR2:
info->dp_max_link_rate = 540000;
break;
case VBT_DP_MAX_LINK_RATE_HBR:
info->dp_max_link_rate = 270000;
break;
case VBT_DP_MAX_LINK_RATE_LBR:
info->dp_max_link_rate = 162000;
break;
}
DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
port_name(port), info->dp_max_link_rate);
}
} }
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
...@@ -1696,6 +1718,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por ...@@ -1696,6 +1718,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
}; };
int i; int i;
...@@ -1734,6 +1757,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) ...@@ -1734,6 +1757,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
[PORT_C] = DVO_PORT_DPC, [PORT_C] = DVO_PORT_DPC,
[PORT_D] = DVO_PORT_DPD, [PORT_D] = DVO_PORT_DPD,
[PORT_E] = DVO_PORT_DPE, [PORT_E] = DVO_PORT_DPE,
[PORT_F] = DVO_PORT_DPF,
}; };
int i; int i;
...@@ -1769,6 +1793,7 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, ...@@ -1769,6 +1793,7 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
}; };
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
...@@ -1935,6 +1960,11 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, ...@@ -1935,6 +1960,11 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
if (port == PORT_D) if (port == PORT_D)
return true; return true;
break; break;
case DVO_PORT_DPF:
case DVO_PORT_HDMIF:
if (port == PORT_F)
return true;
break;
default: default:
break; break;
} }
......
...@@ -224,7 +224,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) ...@@ -224,7 +224,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
struct intel_wait *wait, *n; struct intel_wait *wait, *n;
if (!b->irq_armed) if (!b->irq_armed)
goto wakeup_signaler; return;
/* /*
* We only disarm the irq when we are idle (all requests completed), * We only disarm the irq when we are idle (all requests completed),
...@@ -249,14 +249,6 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) ...@@ -249,14 +249,6 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
b->waiters = RB_ROOT; b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock); spin_unlock_irq(&b->rb_lock);
/*
* The signaling thread may be asleep holding a reference to a request,
* that had its signaling cancelled prior to being preempted. We need
* to kick the signaler, just in case, to release any such reference.
*/
wakeup_signaler:
wake_up_process(b->signaler);
} }
static bool use_fake_irq(const struct intel_breadcrumbs *b) static bool use_fake_irq(const struct intel_breadcrumbs *b)
...@@ -385,6 +377,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -385,6 +377,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
bool first, armed; bool first, armed;
u32 seqno; u32 seqno;
GEM_BUG_ON(!wait->seqno);
/* Insert the request into the retirement ordered list /* Insert the request into the retirement ordered list
* of waiters by walking the rbtree. If we are the oldest * of waiters by walking the rbtree. If we are the oldest
* seqno in the tree (the first to be retired), then * seqno in the tree (the first to be retired), then
...@@ -631,6 +625,63 @@ static void signaler_set_rtpriority(void) ...@@ -631,6 +625,63 @@ static void signaler_set_rtpriority(void)
sched_setscheduler_nocheck(current, SCHED_FIFO, &param); sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
} }
static void __intel_engine_remove_signal(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
lockdep_assert_held(&b->rb_lock);
/*
* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt.
*/
__intel_engine_remove_wait(engine, &request->signaling.wait);
/*
* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may
* have installed an even older signal than the one
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
if (request->signaling.wait.seqno) {
if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb = rb_next(&request->signaling.node);
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
request->signaling.wait.seqno = 0;
}
}
static struct drm_i915_gem_request *
get_first_signal_rcu(struct intel_breadcrumbs *b)
{
/*
* See the big warnings for i915_gem_active_get_rcu() and similarly
* for dma_fence_get_rcu_safe() that explain the intricacies involved
* here with defeating CPU/compiler speculation and enforcing
* the required memory barriers.
*/
do {
struct drm_i915_gem_request *request;
request = rcu_dereference(b->first_signal);
if (request)
request = i915_gem_request_get_rcu(request);
barrier();
if (!request || request == rcu_access_pointer(b->first_signal))
return rcu_pointer_handoff(request);
i915_gem_request_put(request);
} while (1);
}
static int intel_breadcrumbs_signaler(void *arg) static int intel_breadcrumbs_signaler(void *arg)
{ {
struct intel_engine_cs *engine = arg; struct intel_engine_cs *engine = arg;
...@@ -654,41 +705,21 @@ static int intel_breadcrumbs_signaler(void *arg) ...@@ -654,41 +705,21 @@ static int intel_breadcrumbs_signaler(void *arg)
* a new client. * a new client.
*/ */
rcu_read_lock(); rcu_read_lock();
request = rcu_dereference(b->first_signal); request = get_first_signal_rcu(b);
if (request)
request = i915_gem_request_get_rcu(request);
rcu_read_unlock(); rcu_read_unlock();
if (signal_complete(request)) { if (signal_complete(request)) {
local_bh_disable(); if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
dma_fence_signal(&request->fence); &request->fence.flags)) {
local_bh_enable(); /* kick start the tasklets */ local_bh_disable();
dma_fence_signal(&request->fence);
spin_lock_irq(&b->rb_lock); local_bh_enable(); /* kick start the tasklets */
/* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt.
*/
__intel_engine_remove_wait(engine,
&request->signaling.wait);
/* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may
* have installed an even older signal than the one
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb =
rb_next(&request->signaling.node);
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
} }
rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request); if (READ_ONCE(request->signaling.wait.seqno)) {
spin_lock_irq(&b->rb_lock);
__intel_engine_remove_signal(engine, request);
spin_unlock_irq(&b->rb_lock);
}
/* If the engine is saturated we may be continually /* If the engine is saturated we may be continually
* processing completed requests. This angers the * processing completed requests. This angers the
...@@ -699,19 +730,17 @@ static int intel_breadcrumbs_signaler(void *arg) ...@@ -699,19 +730,17 @@ static int intel_breadcrumbs_signaler(void *arg)
*/ */
do_schedule = need_resched(); do_schedule = need_resched();
} }
i915_gem_request_put(request);
if (unlikely(do_schedule)) { if (unlikely(do_schedule)) {
if (kthread_should_park()) if (kthread_should_park())
kthread_parkme(); kthread_parkme();
if (unlikely(kthread_should_stop())) { if (unlikely(kthread_should_stop()))
i915_gem_request_put(request);
break; break;
}
schedule(); schedule();
} }
i915_gem_request_put(request);
} while (1); } while (1);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -740,12 +769,12 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request, ...@@ -740,12 +769,12 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
if (!seqno) if (!seqno)
return; return;
spin_lock(&b->rb_lock);
GEM_BUG_ON(request->signaling.wait.seqno);
request->signaling.wait.tsk = b->signaler; request->signaling.wait.tsk = b->signaler;
request->signaling.wait.request = request; request->signaling.wait.request = request;
request->signaling.wait.seqno = seqno; request->signaling.wait.seqno = seqno;
i915_gem_request_get(request);
spin_lock(&b->rb_lock);
/* First add ourselves into the list of waiters, but register our /* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest * bottom-half as the signaller thread. As per usual, only the oldest
...@@ -784,7 +813,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request, ...@@ -784,7 +813,7 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
rcu_assign_pointer(b->first_signal, request); rcu_assign_pointer(b->first_signal, request);
} else { } else {
__intel_engine_remove_wait(engine, &request->signaling.wait); __intel_engine_remove_wait(engine, &request->signaling.wait);
i915_gem_request_put(request); request->signaling.wait.seqno = 0;
wakeup = false; wakeup = false;
} }
...@@ -796,32 +825,17 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request, ...@@ -796,32 +825,17 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request) void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
{ {
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
GEM_BUG_ON(!irqs_disabled()); GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock); lockdep_assert_held(&request->lock);
GEM_BUG_ON(!request->signaling.wait.seqno);
spin_lock(&b->rb_lock); if (READ_ONCE(request->signaling.wait.seqno)) {
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!RB_EMPTY_NODE(&request->signaling.node)) { spin_lock(&b->rb_lock);
if (request == rcu_access_pointer(b->first_signal)) { __intel_engine_remove_signal(engine, request);
struct rb_node *rb = spin_unlock(&b->rb_lock);
rb_next(&request->signaling.node);
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
i915_gem_request_put(request);
} }
__intel_engine_remove_wait(engine, &request->signaling.wait);
spin_unlock(&b->rb_lock);
request->signaling.wait.seqno = 0;
} }
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
......
...@@ -858,7 +858,7 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv, ...@@ -858,7 +858,7 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
skl_dpll0_update(dev_priv, cdclk_state); skl_dpll0_update(dev_priv, cdclk_state);
cdclk_state->cdclk = cdclk_state->ref; cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0) if (cdclk_state->vco == 0)
goto out; goto out;
...@@ -1006,7 +1006,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1006,7 +1006,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
/* Choose frequency for this cdclk */ /* Choose frequency for this cdclk */
switch (cdclk) { switch (cdclk) {
default: default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref); WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
WARN_ON(vco != 0); WARN_ON(vco != 0);
/* fall through */ /* fall through */
case 308571: case 308571:
...@@ -1085,7 +1085,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) ...@@ -1085,7 +1085,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
/* Is PLL enabled and locked ? */ /* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 || if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref) dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize; goto sanitize;
/* DPLL okay; verify the cdclock /* DPLL okay; verify the cdclock
...@@ -1159,7 +1159,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv) ...@@ -1159,7 +1159,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{ {
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.ref; cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0; cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk); cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
...@@ -1199,7 +1199,7 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) ...@@ -1199,7 +1199,7 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{ {
int ratio; int ratio;
if (cdclk == dev_priv->cdclk.hw.ref) if (cdclk == dev_priv->cdclk.hw.bypass)
return 0; return 0;
switch (cdclk) { switch (cdclk) {
...@@ -1224,7 +1224,7 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) ...@@ -1224,7 +1224,7 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{ {
int ratio; int ratio;
if (cdclk == dev_priv->cdclk.hw.ref) if (cdclk == dev_priv->cdclk.hw.bypass)
return 0; return 0;
switch (cdclk) { switch (cdclk) {
...@@ -1268,7 +1268,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, ...@@ -1268,7 +1268,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_update(dev_priv, cdclk_state); bxt_de_pll_update(dev_priv, cdclk_state);
cdclk_state->cdclk = cdclk_state->ref; cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0) if (cdclk_state->vco == 0)
goto out; goto out;
...@@ -1352,7 +1352,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1352,7 +1352,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,1.5,2,4} */ /* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) { switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default: default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref); WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
WARN_ON(vco != 0); WARN_ON(vco != 0);
/* fall through */ /* fall through */
case 2: case 2:
...@@ -1378,7 +1378,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1378,7 +1378,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock); mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write_timeout(dev_priv, ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 2000); 0x80000000, 150, 2);
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
...@@ -1417,7 +1417,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1417,7 +1417,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/ */
ret = sandybridge_pcode_write_timeout(dev_priv, ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_state->voltage_level, 2000); cdclk_state->voltage_level, 150, 2);
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
if (ret) { if (ret) {
...@@ -1437,7 +1437,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) ...@@ -1437,7 +1437,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 || if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref) dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize; goto sanitize;
/* DPLL okay; verify the cdclock /* DPLL okay; verify the cdclock
...@@ -1526,7 +1526,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) ...@@ -1526,7 +1526,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{ {
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.ref; cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0; cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
...@@ -1586,7 +1586,7 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv, ...@@ -1586,7 +1586,7 @@ static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_update(dev_priv, cdclk_state); cnl_cdclk_pll_update(dev_priv, cdclk_state);
cdclk_state->cdclk = cdclk_state->ref; cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
if (cdclk_state->vco == 0) if (cdclk_state->vco == 0)
goto out; goto out;
...@@ -1672,7 +1672,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, ...@@ -1672,7 +1672,7 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
/* cdclk = vco / 2 / div{1,2} */ /* cdclk = vco / 2 / div{1,2} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) { switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default: default:
WARN_ON(cdclk != dev_priv->cdclk.hw.ref); WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
WARN_ON(vco != 0); WARN_ON(vco != 0);
/* fall through */ /* fall through */
case 2: case 2:
...@@ -1717,7 +1717,7 @@ static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) ...@@ -1717,7 +1717,7 @@ static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{ {
int ratio; int ratio;
if (cdclk == dev_priv->cdclk.hw.ref) if (cdclk == dev_priv->cdclk.hw.bypass)
return 0; return 0;
switch (cdclk) { switch (cdclk) {
...@@ -1744,7 +1744,7 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) ...@@ -1744,7 +1744,7 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 || if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref) dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
goto sanitize; goto sanitize;
/* DPLL okay; verify the cdclock /* DPLL okay; verify the cdclock
...@@ -1817,7 +1817,7 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) ...@@ -1817,7 +1817,7 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{ {
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.ref; cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0; cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
...@@ -1858,9 +1858,10 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a, ...@@ -1858,9 +1858,10 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context) const char *context)
{ {
DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, voltage level %d\n", DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
context, cdclk_state->cdclk, cdclk_state->vco, context, cdclk_state->cdclk, cdclk_state->vco,
cdclk_state->ref, cdclk_state->voltage_level); cdclk_state->ref, cdclk_state->bypass,
cdclk_state->voltage_level);
} }
/** /**
...@@ -1952,6 +1953,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) ...@@ -1952,6 +1953,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk); min_cdclk = max(2 * 96000, min_cdclk);
/*
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
IS_VALLEYVIEW(dev_priv))
min_cdclk = max(320000, min_cdclk);
if (min_cdclk > dev_priv->max_cdclk_freq) { if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq); min_cdclk, dev_priv->max_cdclk_freq);
...@@ -2346,6 +2355,30 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) ...@@ -2346,6 +2355,30 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
return divider + fraction; return divider + fraction;
} }
static int icp_rawclk(struct drm_i915_private *dev_priv)
{
u32 rawclk;
int divider, numerator, denominator, frequency;
if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
frequency = 24000;
divider = 23;
numerator = 0;
denominator = 0;
} else {
frequency = 19200;
divider = 18;
numerator = 1;
denominator = 4;
}
rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
ICP_RAWCLK_DEN(denominator);
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
return frequency;
}
static int pch_rawclk(struct drm_i915_private *dev_priv) static int pch_rawclk(struct drm_i915_private *dev_priv)
{ {
return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
...@@ -2393,8 +2426,9 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) ...@@ -2393,8 +2426,9 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/ */
void intel_update_rawclk(struct drm_i915_private *dev_priv) void intel_update_rawclk(struct drm_i915_private *dev_priv)
{ {
if (HAS_PCH_ICP(dev_priv))
if (HAS_PCH_CNP(dev_priv)) dev_priv->rawclk_freq = icp_rawclk(dev_priv);
else if (HAS_PCH_CNP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv); dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv)) else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv); dev_priv->rawclk_freq = pch_rawclk(dev_priv);
......
...@@ -37,8 +37,9 @@ ...@@ -37,8 +37,9 @@
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_06.bin" #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 6) MODULE_FIRMWARE(I915_CSR_CNL);
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin" #define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_KBL); MODULE_FIRMWARE(I915_CSR_KBL);
......
...@@ -2404,6 +2404,48 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, ...@@ -2404,6 +2404,48 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling); crtc_state->hdmi_scrambling);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
/*
* For some reason these chicken bits have been
* stuffed into a transcoder register, event though
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
static const enum transcoder port_to_transcoder[] = {
[PORT_A] = TRANSCODER_EDP,
[PORT_B] = TRANSCODER_A,
[PORT_C] = TRANSCODER_B,
[PORT_D] = TRANSCODER_C,
[PORT_E] = TRANSCODER_A,
};
enum transcoder transcoder = port_to_transcoder[port];
u32 val;
val = I915_READ(CHICKEN_TRANS(transcoder));
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
DDIE_TRAINING_OVERRIDE_VALUE;
else
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
I915_WRITE(CHICKEN_TRANS(transcoder), val);
POSTING_READ(CHICKEN_TRANS(transcoder));
udelay(1);
if (port == PORT_E)
val &= ~(DDIE_TRAINING_OVERRIDE_ENABLE |
DDIE_TRAINING_OVERRIDE_VALUE);
else
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
I915_WRITE(CHICKEN_TRANS(transcoder), val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values /* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides * are ignored so nothing special needs to be done besides
* enabling the port. * enabling the port.
...@@ -2868,6 +2910,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) ...@@ -2868,6 +2910,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->ddi_io_power_domain = intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_E_IO; POWER_DOMAIN_PORT_DDI_E_IO;
break; break;
case PORT_F:
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_F_IO;
break;
default: default:
MISSING_CASE(port); MISSING_CASE(port);
} }
......
...@@ -56,6 +56,7 @@ static const char * const platform_names[] = { ...@@ -56,6 +56,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(GEMINILAKE), PLATFORM_NAME(GEMINILAKE),
PLATFORM_NAME(COFFEELAKE), PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
}; };
#undef PLATFORM_NAME #undef PLATFORM_NAME
......
...@@ -69,6 +69,8 @@ enum intel_platform { ...@@ -69,6 +69,8 @@ enum intel_platform {
INTEL_COFFEELAKE, INTEL_COFFEELAKE,
/* gen10 */ /* gen10 */
INTEL_CANNONLAKE, INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
INTEL_MAX_PLATFORMS INTEL_MAX_PLATFORMS
}; };
......
...@@ -2387,6 +2387,20 @@ static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) ...@@ -2387,6 +2387,20 @@ static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
} }
} }
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
* the cache-line pairs. The compression state of the cache-line pair
* is specified by 2 bits in the CCS. Each CCS cache-line represents
* an area on the main surface of 16 x16 sets of 128 byte Y-tiled
* cache-line-pairs. CCS is always Y tiled."
*
* Since cache line pairs refers to horizontally adjacent cache lines,
* each cache line in the CCS corresponds to an area of 32x16 cache
* lines on the main surface. Since each pixel is 4 bytes, this gives
* us a ratio of one byte in the CCS for each 8x16 pixels in the
* main surface.
*/
static const struct drm_format_info ccs_formats[] = { static const struct drm_format_info ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
...@@ -2917,14 +2931,19 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state ...@@ -2917,14 +2931,19 @@ static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state
return true; return true;
} }
static int skl_check_main_surface(struct intel_plane_state *plane_state) static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{ {
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation; unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16; int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16; int y = plane_state->base.src.y1 >> 16;
int w = drm_rect_width(&plane_state->base.src) >> 16; int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16;
int dst_x = plane_state->base.dst.x1;
int pipe_src_w = crtc_state->pipe_src_w;
int max_width = skl_max_plane_width(fb, 0, rotation); int max_width = skl_max_plane_width(fb, 0, rotation);
int max_height = 4096; int max_height = 4096;
u32 alignment, offset, aux_offset = plane_state->aux.offset; u32 alignment, offset, aux_offset = plane_state->aux.offset;
...@@ -2935,6 +2954,24 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) ...@@ -2935,6 +2954,24 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
return -EINVAL; return -EINVAL;
} }
/*
* Display WA #1175: cnl,glk
* Planes other than the cursor may cause FIFO underflow and display
* corruption if starting less than 4 pixels from the right edge of
* the screen.
* Besides the above WA fix the similar problem, where planes other
* than the cursor ending less than 4 pixels from the left edge of the
* screen may cause FIFO underflow and display corruption.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
(dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
dst_x + w < 4 ? "end" : "start",
dst_x + w < 4 ? dst_x + w : dst_x,
4, pipe_src_w - 4);
return -ERANGE;
}
intel_add_fb_offsets(&x, &y, plane_state, 0); intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0); offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0); alignment = intel_surf_alignment(fb, 0);
...@@ -3027,6 +3064,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) ...@@ -3027,6 +3064,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
{ {
struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_framebuffer *fb = plane_state->base.fb;
int src_x = plane_state->base.src.x1 >> 16; int src_x = plane_state->base.src.x1 >> 16;
...@@ -3037,17 +3075,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) ...@@ -3037,17 +3075,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
int y = src_y / vsub; int y = src_y / vsub;
u32 offset; u32 offset;
switch (plane->id) { if (!skl_plane_has_ccs(dev_priv, crtc->pipe, plane->id)) {
case PLANE_PRIMARY: DRM_DEBUG_KMS("No RC support on %s\n", plane->base.name);
case PLANE_SPRITE0:
break;
default:
DRM_DEBUG_KMS("RC support only on plane 1 and 2\n");
return -EINVAL;
}
if (crtc->pipe == PIPE_C) {
DRM_DEBUG_KMS("No RC support on pipe C\n");
return -EINVAL; return -EINVAL;
} }
...@@ -3067,7 +3096,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) ...@@ -3067,7 +3096,8 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
return 0; return 0;
} }
int skl_check_plane_surface(struct intel_plane_state *plane_state) int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{ {
const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation; unsigned int rotation = plane_state->base.rotation;
...@@ -3107,7 +3137,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) ...@@ -3107,7 +3137,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
plane_state->aux.y = 0; plane_state->aux.y = 0;
} }
ret = skl_check_main_surface(plane_state); ret = skl_check_main_surface(crtc_state, plane_state);
if (ret) if (ret)
return ret; return ret;
...@@ -4757,7 +4787,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, ...@@ -4757,7 +4787,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return ret; return ret;
/* check colorkey */ /* check colorkey */
if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { if (plane_state->ckey.flags) {
DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
intel_plane->base.base.id, intel_plane->base.base.id,
intel_plane->base.name); intel_plane->base.name);
...@@ -5641,6 +5671,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) ...@@ -5641,6 +5671,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_D_LANES; return POWER_DOMAIN_PORT_DDI_D_LANES;
case PORT_E: case PORT_E:
return POWER_DOMAIN_PORT_DDI_E_LANES; return POWER_DOMAIN_PORT_DDI_E_LANES;
case PORT_F:
return POWER_DOMAIN_PORT_DDI_F_LANES;
default: default:
MISSING_CASE(port); MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER; return POWER_DOMAIN_PORT_OTHER;
...@@ -8495,7 +8527,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, ...@@ -8495,7 +8527,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(PLANE_CTL(pipe, plane_id)); val = I915_READ(PLANE_CTL(pipe, plane_id));
pixel_format = val & PLANE_CTL_FORMAT_MASK; if (INTEL_GEN(dev_priv) >= 11)
pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
else
pixel_format = val & PLANE_CTL_FORMAT_MASK;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
...@@ -12519,7 +12554,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait, ...@@ -12519,7 +12554,13 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
struct drm_i915_gem_request *rq = wait->request; struct drm_i915_gem_request *rq = wait->request;
gen6_rps_boost(rq, NULL); /*
* If we missed the vblank, but the request is already running it
* is reasonable to assume that it will complete before the next
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_gem_request_started(rq))
gen6_rps_boost(rq, NULL);
i915_gem_request_put(rq); i915_gem_request_put(rq);
drm_crtc_vblank_put(wait->crtc); drm_crtc_vblank_put(wait->crtc);
...@@ -12747,7 +12788,7 @@ intel_check_primary_plane(struct intel_plane *plane, ...@@ -12747,7 +12788,7 @@ intel_check_primary_plane(struct intel_plane *plane,
if (INTEL_GEN(dev_priv) >= 9) { if (INTEL_GEN(dev_priv) >= 9) {
/* use scaler when colorkey is not required */ /* use scaler when colorkey is not required */
if (state->ckey.flags == I915_SET_COLORKEY_NONE) { if (!state->ckey.flags) {
min_scale = 1; min_scale = 1;
max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
} }
...@@ -12766,7 +12807,7 @@ intel_check_primary_plane(struct intel_plane *plane, ...@@ -12766,7 +12807,7 @@ intel_check_primary_plane(struct intel_plane *plane,
return 0; return 0;
if (INTEL_GEN(dev_priv) >= 9) { if (INTEL_GEN(dev_priv) >= 9) {
ret = skl_check_plane_surface(state); ret = skl_check_plane_surface(crtc_state, state);
if (ret) if (ret)
return ret; return ret;
...@@ -12944,8 +12985,6 @@ static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane, ...@@ -12944,8 +12985,6 @@ static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
return i965_mod_supported(format, modifier); return i965_mod_supported(format, modifier);
else else
return i8xx_mod_supported(format, modifier); return i8xx_mod_supported(format, modifier);
unreachable();
} }
static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane, static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
...@@ -13153,21 +13192,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -13153,21 +13192,14 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
else else
primary->i9xx_plane = (enum i9xx_plane_id) pipe; primary->i9xx_plane = (enum i9xx_plane_id) pipe;
primary->id = PLANE_PRIMARY; primary->id = PLANE_PRIMARY;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
primary->check_plane = intel_check_primary_plane; primary->check_plane = intel_check_primary_plane;
if (INTEL_GEN(dev_priv) >= 10) { if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats; intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats); num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
primary->update_plane = skl_update_plane; if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
primary->disable_plane = skl_disable_plane;
primary->get_hw_state = skl_plane_get_hw_state;
} else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
if (pipe < PIPE_C)
modifiers = skl_format_modifiers_ccs; modifiers = skl_format_modifiers_ccs;
else else
modifiers = skl_format_modifiers_noccs; modifiers = skl_format_modifiers_noccs;
...@@ -13281,7 +13313,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, ...@@ -13281,7 +13313,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
cursor->pipe = pipe; cursor->pipe = pipe;
cursor->i9xx_plane = (enum i9xx_plane_id) pipe; cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
cursor->id = PLANE_CURSOR; cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
cursor->update_plane = i845_update_cursor; cursor->update_plane = i845_update_cursor;
...@@ -13597,7 +13629,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) ...@@ -13597,7 +13629,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (found || IS_GEN9_BC(dev_priv)) if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */ * register */
found = I915_READ(SFUSE_STRAP); found = I915_READ(SFUSE_STRAP);
...@@ -13607,6 +13639,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) ...@@ -13607,6 +13639,8 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED) if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev_priv, PORT_D); intel_ddi_init(dev_priv, PORT_D);
if (found & SFUSE_STRAP_DDIF_DETECTED)
intel_ddi_init(dev_priv, PORT_F);
/* /*
* On SKL we don't have a way to detect DDI-E so we rely on VBT. * On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/ */
......
...@@ -119,6 +119,7 @@ enum port { ...@@ -119,6 +119,7 @@ enum port {
PORT_C, PORT_C,
PORT_D, PORT_D,
PORT_E, PORT_E,
PORT_F,
I915_MAX_PORTS I915_MAX_PORTS
}; };
...@@ -156,11 +157,13 @@ enum intel_display_power_domain { ...@@ -156,11 +157,13 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO, POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO, POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_PORT_OTHER,
...@@ -171,6 +174,7 @@ enum intel_display_power_domain { ...@@ -171,6 +174,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_F,
POWER_DOMAIN_GMBUS, POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET, POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ, POWER_DOMAIN_GT_IRQ,
......
...@@ -155,6 +155,28 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) ...@@ -155,6 +155,28 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i; intel_dp->num_sink_rates = i;
} }
/* Get length of rates array potentially limited by max_rate. */
static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
{
int i;
/* Limit results by potentially reduced max rate */
for (i = 0; i < len; i++) {
if (rates[len - i - 1] <= max_rate)
return len - i;
}
return 0;
}
/* Get length of common rates array potentially limited by max_rate. */
static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
int max_rate)
{
return intel_dp_rate_limit_len(intel_dp->common_rates,
intel_dp->num_common_rates, max_rate);
}
/* Theoretical max between source and sink */ /* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp) static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{ {
...@@ -218,15 +240,38 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) ...@@ -218,15 +240,38 @@ intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
return max_dotclk; return max_dotclk;
} }
static int cnl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
/* Low voltage SKUs are limited to max of 5.4G */
if (voltage == VOLTAGE_INFO_0_85V)
return 540000;
/* For this SKU 8.1G is supported in all ports */
if (IS_CNL_WITH_PORT_F(dev_priv))
return 810000;
/* For other SKUs, max rate on ports A and B is 5.4G */
if (port == PORT_A || port == PORT_D)
return 540000;
return 810000;
}
static void static void
intel_dp_set_source_rates(struct intel_dp *intel_dp) intel_dp_set_source_rates(struct intel_dp *intel_dp)
{ {
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port; const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[dig_port->base.port];
const int *source_rates; const int *source_rates;
int size; int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
u32 voltage;
/* This should only be done once */ /* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
...@@ -237,10 +282,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) ...@@ -237,10 +282,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
} else if (IS_CANNONLAKE(dev_priv)) { } else if (IS_CANNONLAKE(dev_priv)) {
source_rates = cnl_rates; source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates); size = ARRAY_SIZE(cnl_rates);
voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; max_rate = cnl_max_source_rate(intel_dp);
if (port == PORT_A || port == PORT_D ||
voltage == VOLTAGE_INFO_0_85V)
size -= 2;
} else if (IS_GEN9_BC(dev_priv)) { } else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates; source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates); size = ARRAY_SIZE(skl_rates);
...@@ -253,6 +295,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) ...@@ -253,6 +295,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(default_rates) - 1; size = ARRAY_SIZE(default_rates) - 1;
} }
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
max_rate = vbt_max_rate;
if (max_rate)
size = intel_dp_rate_limit_len(source_rates, size, max_rate);
intel_dp->source_rates = source_rates; intel_dp->source_rates = source_rates;
intel_dp->num_source_rates = size; intel_dp->num_source_rates = size;
} }
...@@ -309,22 +359,6 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp) ...@@ -309,22 +359,6 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
} }
} }
/* get length of common rates potentially limited by max_rate */
static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
int max_rate)
{
const int *common_rates = intel_dp->common_rates;
int i, common_len = intel_dp->num_common_rates;
/* Limit results by potentially reduced max rate */
for (i = 0; i < common_len; i++) {
if (common_rates[common_len - i - 1] <= max_rate)
return common_len - i;
}
return 0;
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
uint8_t lane_count) uint8_t lane_count)
{ {
...@@ -794,7 +828,8 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, ...@@ -794,7 +828,8 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_stat = PP_STATUS(pps_idx); regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx); regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx); regs->pp_off = PP_OFF_DELAYS(pps_idx);
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
!HAS_PCH_ICP(dev_priv))
regs->pp_div = PP_DIVISOR(pps_idx); regs->pp_div = PP_DIVISOR(pps_idx);
} }
...@@ -1298,6 +1333,9 @@ static enum port intel_aux_port(struct drm_i915_private *dev_priv, ...@@ -1298,6 +1333,9 @@ static enum port intel_aux_port(struct drm_i915_private *dev_priv,
case DP_AUX_D: case DP_AUX_D:
aux_port = PORT_D; aux_port = PORT_D;
break; break;
case DP_AUX_F:
aux_port = PORT_F;
break;
default: default:
MISSING_CASE(info->alternate_aux_channel); MISSING_CASE(info->alternate_aux_channel);
aux_port = PORT_A; aux_port = PORT_A;
...@@ -1378,6 +1416,7 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, ...@@ -1378,6 +1416,7 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
case PORT_B: case PORT_B:
case PORT_C: case PORT_C:
case PORT_D: case PORT_D:
case PORT_F:
return DP_AUX_CH_CTL(port); return DP_AUX_CH_CTL(port);
default: default:
MISSING_CASE(port); MISSING_CASE(port);
...@@ -1393,6 +1432,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, ...@@ -1393,6 +1432,7 @@ static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
case PORT_B: case PORT_B:
case PORT_C: case PORT_C:
case PORT_D: case PORT_D:
case PORT_F:
return DP_AUX_CH_DATA(port, index); return DP_AUX_CH_DATA(port, index);
default: default:
MISSING_CASE(port); MISSING_CASE(port);
...@@ -4455,173 +4495,174 @@ edp_detect(struct intel_dp *intel_dp) ...@@ -4455,173 +4495,174 @@ edp_detect(struct intel_dp *intel_dp)
return status; return status;
} }
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, static bool ibx_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit; u32 bit;
switch (port->base.port) { switch (encoder->hpd_pin) {
case PORT_B: case HPD_PORT_B:
bit = SDE_PORTB_HOTPLUG; bit = SDE_PORTB_HOTPLUG;
break; break;
case PORT_C: case HPD_PORT_C:
bit = SDE_PORTC_HOTPLUG; bit = SDE_PORTC_HOTPLUG;
break; break;
case PORT_D: case HPD_PORT_D:
bit = SDE_PORTD_HOTPLUG; bit = SDE_PORTD_HOTPLUG;
break; break;
default: default:
MISSING_CASE(port->base.port); MISSING_CASE(encoder->hpd_pin);
return false; return false;
} }
return I915_READ(SDEISR) & bit; return I915_READ(SDEISR) & bit;
} }
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv, static bool cpt_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit; u32 bit;
switch (port->base.port) { switch (encoder->hpd_pin) {
case PORT_B: case HPD_PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT; bit = SDE_PORTB_HOTPLUG_CPT;
break; break;
case PORT_C: case HPD_PORT_C:
bit = SDE_PORTC_HOTPLUG_CPT; bit = SDE_PORTC_HOTPLUG_CPT;
break; break;
case PORT_D: case HPD_PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT; bit = SDE_PORTD_HOTPLUG_CPT;
break; break;
default: default:
MISSING_CASE(port->base.port); MISSING_CASE(encoder->hpd_pin);
return false; return false;
} }
return I915_READ(SDEISR) & bit; return I915_READ(SDEISR) & bit;
} }
static bool spt_digital_port_connected(struct drm_i915_private *dev_priv, static bool spt_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit; u32 bit;
switch (port->base.port) { switch (encoder->hpd_pin) {
case PORT_A: case HPD_PORT_A:
bit = SDE_PORTA_HOTPLUG_SPT; bit = SDE_PORTA_HOTPLUG_SPT;
break; break;
case PORT_E: case HPD_PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT; bit = SDE_PORTE_HOTPLUG_SPT;
break; break;
default: default:
return cpt_digital_port_connected(dev_priv, port); return cpt_digital_port_connected(encoder);
} }
return I915_READ(SDEISR) & bit; return I915_READ(SDEISR) & bit;
} }
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv, static bool g4x_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit; u32 bit;
switch (port->base.port) { switch (encoder->hpd_pin) {
case PORT_B: case HPD_PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
break; break;
case PORT_C: case HPD_PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
break; break;
case PORT_D: case HPD_PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
break; break;
default: default:
MISSING_CASE(port->base.port); MISSING_CASE(encoder->hpd_pin);
return false; return false;
} }
return I915_READ(PORT_HOTPLUG_STAT) & bit; return I915_READ(PORT_HOTPLUG_STAT) & bit;
} }
static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv, static bool gm45_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 bit; u32 bit;
switch (port->base.port) { switch (encoder->hpd_pin) {
case PORT_B: case HPD_PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break; break;
case PORT_C: case HPD_PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break; break;
case PORT_D: case HPD_PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break; break;
default: default:
MISSING_CASE(port->base.port); MISSING_CASE(encoder->hpd_pin);
return false; return false;
} }
return I915_READ(PORT_HOTPLUG_STAT) & bit; return I915_READ(PORT_HOTPLUG_STAT) & bit;
} }
static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv, static bool ilk_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
if (port->base.port == PORT_A) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG; return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else else
return ibx_digital_port_connected(dev_priv, port); return ibx_digital_port_connected(encoder);
} }
static bool snb_digital_port_connected(struct drm_i915_private *dev_priv, static bool snb_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
if (port->base.port == PORT_A) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG; return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
else else
return cpt_digital_port_connected(dev_priv, port); return cpt_digital_port_connected(encoder);
} }
static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv, static bool ivb_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
if (port->base.port == PORT_A) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
else else
return cpt_digital_port_connected(dev_priv, port); return cpt_digital_port_connected(encoder);
} }
static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv, static bool bdw_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
if (port->base.port == PORT_A) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else else
return cpt_digital_port_connected(dev_priv, port); return cpt_digital_port_connected(encoder);
} }
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, static bool bxt_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *intel_dig_port)
{ {
struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port;
u32 bit; u32 bit;
port = intel_hpd_pin_to_port(intel_encoder->hpd_pin); switch (encoder->hpd_pin) {
switch (port) { case HPD_PORT_A:
case PORT_A:
bit = BXT_DE_PORT_HP_DDIA; bit = BXT_DE_PORT_HP_DDIA;
break; break;
case PORT_B: case HPD_PORT_B:
bit = BXT_DE_PORT_HP_DDIB; bit = BXT_DE_PORT_HP_DDIB;
break; break;
case PORT_C: case HPD_PORT_C:
bit = BXT_DE_PORT_HP_DDIC; bit = BXT_DE_PORT_HP_DDIC;
break; break;
default: default:
MISSING_CASE(port); MISSING_CASE(encoder->hpd_pin);
return false; return false;
} }
...@@ -4630,33 +4671,33 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, ...@@ -4630,33 +4671,33 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
/* /*
* intel_digital_port_connected - is the specified port connected? * intel_digital_port_connected - is the specified port connected?
* @dev_priv: i915 private structure * @encoder: intel_encoder
* @port: the port to test
* *
* Return %true if @port is connected, %false otherwise. * Return %true if port is connected, %false otherwise.
*/ */
bool intel_digital_port_connected(struct drm_i915_private *dev_priv, bool intel_digital_port_connected(struct intel_encoder *encoder)
struct intel_digital_port *port)
{ {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (HAS_GMCH_DISPLAY(dev_priv)) { if (HAS_GMCH_DISPLAY(dev_priv)) {
if (IS_GM45(dev_priv)) if (IS_GM45(dev_priv))
return gm45_digital_port_connected(dev_priv, port); return gm45_digital_port_connected(encoder);
else else
return g4x_digital_port_connected(dev_priv, port); return g4x_digital_port_connected(encoder);
} }
if (IS_GEN5(dev_priv)) if (IS_GEN5(dev_priv))
return ilk_digital_port_connected(dev_priv, port); return ilk_digital_port_connected(encoder);
else if (IS_GEN6(dev_priv)) else if (IS_GEN6(dev_priv))
return snb_digital_port_connected(dev_priv, port); return snb_digital_port_connected(encoder);
else if (IS_GEN7(dev_priv)) else if (IS_GEN7(dev_priv))
return ivb_digital_port_connected(dev_priv, port); return ivb_digital_port_connected(encoder);
else if (IS_GEN8(dev_priv)) else if (IS_GEN8(dev_priv))
return bdw_digital_port_connected(dev_priv, port); return bdw_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port); return bxt_digital_port_connected(encoder);
else else
return spt_digital_port_connected(dev_priv, port); return spt_digital_port_connected(encoder);
} }
static struct edid * static struct edid *
...@@ -4715,8 +4756,7 @@ intel_dp_long_pulse(struct intel_connector *connector) ...@@ -4715,8 +4756,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
/* Can't disconnect eDP, but you can close the lid... */ /* Can't disconnect eDP, but you can close the lid... */
if (intel_dp_is_edp(intel_dp)) if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp); status = edp_detect(intel_dp);
else if (intel_digital_port_connected(dev_priv, else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
dp_to_dig_port(intel_dp)))
status = intel_dp_detect_dpcd(intel_dp); status = intel_dp_detect_dpcd(intel_dp);
else else
status = connector_status_disconnected; status = connector_status_disconnected;
...@@ -5227,7 +5267,8 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) ...@@ -5227,7 +5267,8 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_on = I915_READ(regs.pp_on); pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off); pp_off = I915_READ(regs.pp_off);
if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv)) { if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
!HAS_PCH_ICP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl); I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div); pp_div = I915_READ(regs.pp_div);
} }
...@@ -5245,7 +5286,8 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) ...@@ -5245,7 +5286,8 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT; PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) {
seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT) * 1000; BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
} else { } else {
...@@ -5416,7 +5458,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, ...@@ -5416,7 +5458,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec /* Compute the divisor for the pp clock, simply match the Bspec
* formula. */ * formula. */
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) { if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl); pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
...@@ -5442,7 +5485,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, ...@@ -5442,7 +5485,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_on, pp_on); I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off); I915_WRITE(regs.pp_off, pp_off);
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div); I915_WRITE(regs.pp_ctrl, pp_div);
else else
I915_WRITE(regs.pp_div, pp_div); I915_WRITE(regs.pp_div, pp_div);
...@@ -5450,7 +5494,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, ...@@ -5450,7 +5494,8 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on), I915_READ(regs.pp_on),
I915_READ(regs.pp_off), I915_READ(regs.pp_off),
(IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) ? (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
HAS_PCH_ICP(dev_priv)) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div)); I915_READ(regs.pp_div));
} }
...@@ -5970,8 +6015,10 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port) ...@@ -5970,8 +6015,10 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
{ {
struct intel_encoder *encoder = &intel_dig_port->base; struct intel_encoder *encoder = &intel_dig_port->base;
struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
encoder->hpd_pin = intel_hpd_pin(encoder->port); encoder->hpd_pin = intel_hpd_pin_default(dev_priv, encoder->port);
switch (encoder->port) { switch (encoder->port) {
case PORT_A: case PORT_A:
...@@ -5990,6 +6037,9 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port) ...@@ -5990,6 +6037,9 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
/* FIXME: Check VBT for actual wiring of PORT E */ /* FIXME: Check VBT for actual wiring of PORT E */
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D; intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break; break;
case PORT_F:
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_F;
break;
default: default:
MISSING_CASE(encoder->port); MISSING_CASE(encoder->port);
} }
...@@ -6116,7 +6166,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -6116,7 +6166,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* init MST on ports that can support it */ /* init MST on ports that can support it */
if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D)) (port == PORT_B || port == PORT_C ||
port == PORT_D || port == PORT_F))
intel_dp_mst_encoder_init(intel_dig_port, intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id); intel_connector->base.base.id);
......
...@@ -1507,7 +1507,8 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, ...@@ -1507,7 +1507,8 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation); unsigned int rotation);
int skl_check_plane_surface(struct intel_plane_state *plane_state); int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state); int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */ /* intel_csr.c */
...@@ -1590,8 +1591,7 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) ...@@ -1590,8 +1591,7 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes); int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv, bool intel_digital_port_connected(struct intel_encoder *encoder);
struct intel_digital_port *port);
/* intel_dp_aux_backlight.c */ /* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
...@@ -1760,6 +1760,7 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con ...@@ -1760,6 +1760,7 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
/* intel_psr.c */ /* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
void intel_psr_enable(struct intel_dp *intel_dp, void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state); const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp, void intel_psr_disable(struct intel_dp *intel_dp,
...@@ -1932,6 +1933,8 @@ void skl_update_plane(struct intel_plane *plane, ...@@ -1932,6 +1933,8 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane); bool skl_plane_get_hw_state(struct intel_plane *plane);
bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
/* intel_tv.c */ /* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv); void intel_tv_init(struct drm_i915_private *dev_priv);
......
...@@ -38,9 +38,11 @@ ...@@ -38,9 +38,11 @@
*/ */
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE) #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
...@@ -157,6 +159,9 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) ...@@ -157,6 +159,9 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
switch (INTEL_GEN(dev_priv)) { switch (INTEL_GEN(dev_priv)) {
default: default:
MISSING_CASE(INTEL_GEN(dev_priv)); MISSING_CASE(INTEL_GEN(dev_priv));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 10: case 10:
return GEN10_LR_CONTEXT_RENDER_SIZE; return GEN10_LR_CONTEXT_RENDER_SIZE;
case 9: case 9:
...@@ -1389,7 +1394,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine) ...@@ -1389,7 +1394,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
int err; int err;
WARN_ON(engine->id != RCS); if (GEM_WARN_ON(engine->id != RCS))
return -EINVAL;
dev_priv->workarounds.count = 0; dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[engine->id] = 0; dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
...@@ -1943,16 +1949,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) ...@@ -1943,16 +1949,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
*/ */
int intel_enable_engine_stats(struct intel_engine_cs *engine) int intel_enable_engine_stats(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists *execlists = &engine->execlists;
unsigned long flags; unsigned long flags;
int err = 0;
if (!intel_engine_supports_stats(engine)) if (!intel_engine_supports_stats(engine))
return -ENODEV; return -ENODEV;
tasklet_disable(&execlists->tasklet);
spin_lock_irqsave(&engine->stats.lock, flags); spin_lock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled == ~0)
goto busy; if (unlikely(engine->stats.enabled == ~0)) {
err = -EBUSY;
goto unlock;
}
if (engine->stats.enabled++ == 0) { if (engine->stats.enabled++ == 0) {
struct intel_engine_execlists *execlists = &engine->execlists;
const struct execlist_port *port = execlists->port; const struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists); unsigned int num_ports = execlists_num_ports(execlists);
...@@ -1967,14 +1979,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1967,14 +1979,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (engine->stats.active) if (engine->stats.active)
engine->stats.start = engine->stats.enabled_at; engine->stats.start = engine->stats.enabled_at;
} }
spin_unlock_irqrestore(&engine->stats.lock, flags);
return 0;
busy: unlock:
spin_unlock_irqrestore(&engine->stats.lock, flags); spin_unlock_irqrestore(&engine->stats.lock, flags);
tasklet_enable(&execlists->tasklet);
return -EBUSY; return err;
} }
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
......
...@@ -492,7 +492,8 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc) ...@@ -492,7 +492,8 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
schedule_work(&work->work); schedule_work(&work->work);
} }
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{ {
struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc *fbc = &dev_priv->fbc;
...@@ -505,6 +506,8 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) ...@@ -505,6 +506,8 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
if (fbc->active) if (fbc->active)
intel_fbc_hw_deactivate(dev_priv); intel_fbc_hw_deactivate(dev_priv);
fbc->no_fbc_reason = reason;
} }
static bool multiple_pipes_ok(struct intel_crtc *crtc, static bool multiple_pipes_ok(struct intel_crtc *crtc,
...@@ -668,11 +671,13 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) ...@@ -668,11 +671,13 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
static bool stride_is_valid(struct drm_i915_private *dev_priv, static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride) unsigned int stride)
{ {
/* These should have been caught earlier. */ /* This should have been caught earlier. */
WARN_ON(stride < 512); if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
WARN_ON((stride & (64 - 1)) != 0); return false;
/* Below are the additional FBC restrictions. */ /* Below are the additional FBC restrictions. */
if (stride < 512)
return false;
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
return stride == 4096 || stride == 8192; return stride == 4096 || stride == 8192;
...@@ -921,6 +926,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, ...@@ -921,6 +926,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
{ {
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
if (!fbc_supported(dev_priv)) if (!fbc_supported(dev_priv))
return; return;
...@@ -928,7 +934,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, ...@@ -928,7 +934,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
mutex_lock(&fbc->lock); mutex_lock(&fbc->lock);
if (!multiple_pipes_ok(crtc, plane_state)) { if (!multiple_pipes_ok(crtc, plane_state)) {
fbc->no_fbc_reason = "more than one pipe active"; reason = "more than one pipe active";
goto deactivate; goto deactivate;
} }
...@@ -938,7 +944,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, ...@@ -938,7 +944,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
intel_fbc_update_state_cache(crtc, crtc_state, plane_state); intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
deactivate: deactivate:
intel_fbc_deactivate(dev_priv); intel_fbc_deactivate(dev_priv, reason);
unlock: unlock:
mutex_unlock(&fbc->lock); mutex_unlock(&fbc->lock);
} }
...@@ -971,9 +977,8 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) ...@@ -971,9 +977,8 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
intel_fbc_reg_params_equal(&old_params, &fbc->params)) intel_fbc_reg_params_equal(&old_params, &fbc->params))
return; return;
intel_fbc_deactivate(dev_priv); intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
intel_fbc_schedule_activation(crtc); intel_fbc_schedule_activation(crtc);
fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
} }
void intel_fbc_post_update(struct intel_crtc *crtc) void intel_fbc_post_update(struct intel_crtc *crtc)
...@@ -1014,7 +1019,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, ...@@ -1014,7 +1019,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
if (fbc->enabled && fbc->busy_bits) if (fbc->enabled && fbc->busy_bits)
intel_fbc_deactivate(dev_priv); intel_fbc_deactivate(dev_priv, "frontbuffer write");
mutex_unlock(&fbc->lock); mutex_unlock(&fbc->lock);
} }
...@@ -1244,7 +1249,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) ...@@ -1244,7 +1249,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
fbc->underrun_detected = true; fbc->underrun_detected = true;
intel_fbc_deactivate(dev_priv); intel_fbc_deactivate(dev_priv, "FIFO underrun");
out: out:
mutex_unlock(&fbc->lock); mutex_unlock(&fbc->lock);
} }
...@@ -1371,7 +1376,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) ...@@ -1371,7 +1376,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
fbc->possible_framebuffer_bits |= fbc->possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe); INTEL_FRONTBUFFER(pipe, PLANE_PRIMARY);
if (fbc_on_pipe_a_only(dev_priv)) if (fbc_on_pipe_a_only(dev_priv))
break; break;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include "intel_guc.h" #include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h" #include "intel_guc_submission.h"
#include "i915_drv.h" #include "i915_drv.h"
...@@ -63,6 +64,7 @@ void intel_guc_init_early(struct intel_guc *guc) ...@@ -63,6 +64,7 @@ void intel_guc_init_early(struct intel_guc *guc)
{ {
intel_guc_fw_init_early(guc); intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct); intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(guc);
mutex_init(&guc->send_mutex); mutex_init(&guc->send_mutex);
guc->send = intel_guc_send_nop; guc->send = intel_guc_send_nop;
...@@ -86,8 +88,10 @@ int intel_guc_init_wq(struct intel_guc *guc) ...@@ -86,8 +88,10 @@ int intel_guc_init_wq(struct intel_guc *guc)
*/ */
guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log", guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE); WQ_HIGHPRI | WQ_FREEZABLE);
if (!guc->log.runtime.flush_wq) if (!guc->log.runtime.flush_wq) {
DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
return -ENOMEM; return -ENOMEM;
}
/* /*
* Even though both sending GuC action, and adding a new workitem to * Even though both sending GuC action, and adding a new workitem to
...@@ -108,6 +112,8 @@ int intel_guc_init_wq(struct intel_guc *guc) ...@@ -108,6 +112,8 @@ int intel_guc_init_wq(struct intel_guc *guc)
WQ_HIGHPRI); WQ_HIGHPRI);
if (!guc->preempt_wq) { if (!guc->preempt_wq) {
destroy_workqueue(guc->log.runtime.flush_wq); destroy_workqueue(guc->log.runtime.flush_wq);
DRM_ERROR("Couldn't allocate workqueue for GuC "
"preemption\n");
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -163,10 +169,25 @@ int intel_guc_init(struct intel_guc *guc) ...@@ -163,10 +169,25 @@ int intel_guc_init(struct intel_guc *guc)
return ret; return ret;
GEM_BUG_ON(!guc->shared_data); GEM_BUG_ON(!guc->shared_data);
ret = intel_guc_log_create(guc);
if (ret)
goto err_shared;
ret = intel_guc_ads_create(guc);
if (ret)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
/* We need to notify the guc whenever we change the GGTT */ /* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv); i915_ggtt_enable_guc(dev_priv);
return 0; return 0;
err_log:
intel_guc_log_destroy(guc);
err_shared:
guc_shared_data_destroy(guc);
return ret;
} }
void intel_guc_fini(struct intel_guc *guc) void intel_guc_fini(struct intel_guc *guc)
...@@ -174,6 +195,8 @@ void intel_guc_fini(struct intel_guc *guc) ...@@ -174,6 +195,8 @@ void intel_guc_fini(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
i915_ggtt_disable_guc(dev_priv); i915_ggtt_disable_guc(dev_priv);
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(guc);
guc_shared_data_destroy(guc); guc_shared_data_destroy(guc);
} }
...@@ -197,6 +220,19 @@ static u32 get_core_family(struct drm_i915_private *dev_priv) ...@@ -197,6 +220,19 @@ static u32 get_core_family(struct drm_i915_private *dev_priv)
} }
} }
static u32 get_log_verbosity_flags(void)
{
if (i915_modparams.guc_log_level > 0) {
u32 verbosity = i915_modparams.guc_log_level - 1;
GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
return verbosity << GUC_LOG_VERBOSITY_SHIFT;
}
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return GUC_LOG_DISABLED;
}
/* /*
* Initialise the GuC parameter block before starting the firmware * Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup * transfer. These parameters are read by the firmware on startup
...@@ -229,12 +265,7 @@ void intel_guc_init_params(struct intel_guc *guc) ...@@ -229,12 +265,7 @@ void intel_guc_init_params(struct intel_guc *guc)
params[GUC_CTL_LOG_PARAMS] = guc->log.flags; params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
if (i915_modparams.guc_log_level >= 0) { params[GUC_CTL_DEBUG] = get_log_verbosity_flags();
params[GUC_CTL_DEBUG] =
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
} else {
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
}
/* If GuC submission is enabled, set up additional parameters here */ /* If GuC submission is enabled, set up additional parameters here */
if (USES_GUC_SUBMISSION(dev_priv)) { if (USES_GUC_SUBMISSION(dev_priv)) {
...@@ -427,7 +458,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) ...@@ -427,7 +458,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0; return 0;
if (i915_modparams.guc_log_level >= 0) if (i915_modparams.guc_log_level)
gen9_enable_guc_interrupts(dev_priv); gen9_enable_guc_interrupts(dev_priv);
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
......
/*
* Copyright © 2014-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
/*
* The Additional Data Struct (ADS) has pointers for different buffers used by
* the GuC. One single gem object contains the ADS struct itself (guc_ads), the
* scheduling policies (guc_policies), a structure describing a collection of
* register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
* its internal state for sleep.
*/
static void guc_policy_init(struct guc_policy *policy)
{
policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
policy->policy_flags = 0;
}
static void guc_policies_init(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
guc_policy_init(policy);
}
}
policies->is_valid = 1;
}
/*
* The first 80 dwords of the register state context, containing the
* execlists and ppgtt registers.
*/
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
/**
* intel_guc_ads_create() - creates GuC ADS
* @guc: intel_guc struct
*
*/
int intel_guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma;
struct page *page;
/* The ads obj includes the struct itself and buffers passed to GuC */
struct {
struct guc_ads ads;
struct guc_policies policies;
struct guc_mmio_reg_state reg_state;
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
GEM_BUG_ON(guc->ads_vma);
vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
if (IS_ERR(vma))
return PTR_ERR(vma);
guc->ads_vma = vma;
page = i915_vma_first_page(vma);
blob = kmap(page);
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
/* MMIO reg state */
for_each_engine(engine, dev_priv, id) {
blob->reg_state.white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
blob->reg_state.white_list[engine->guc_id].count = 0;
}
/*
* The GuC requires a "Golden Context" when it reinitialises
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
blob->ads.golden_context_lrca =
guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) +
skipped_offset;
/*
* The GuC expects us to exclude the portion of the context image that
* it skips from the size it is to read. It starts reading from after
* the execlist context (so skipping the first page [PPHWSP] and 80
* dwords). Weird guc is weird.
*/
for_each_engine(engine, dev_priv, id)
blob->ads.eng_state_size[engine->guc_id] =
engine->context_size - skipped_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page);
return 0;
}
void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma);
}
/*
* Copyright © 2014-2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef _INTEL_GUC_ADS_H_
#define _INTEL_GUC_ADS_H_
struct intel_guc;
int intel_guc_ads_create(struct intel_guc *guc);
void intel_guc_ads_destroy(struct intel_guc *guc);
#endif
...@@ -33,11 +33,10 @@ static void guc_log_capture_logs(struct intel_guc *guc); ...@@ -33,11 +33,10 @@ static void guc_log_capture_logs(struct intel_guc *guc);
/** /**
* DOC: GuC firmware log * DOC: GuC firmware log
* *
* Firmware log is enabled by setting i915.guc_log_level to non-negative level. * Firmware log is enabled by setting i915.guc_log_level to the positive level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch * i915_guc_load_status will print out firmware loading status and scratch
* registers value. * registers value.
*
*/ */
static int guc_log_flush_complete(struct intel_guc *guc) static int guc_log_flush_complete(struct intel_guc *guc)
...@@ -59,11 +58,15 @@ static int guc_log_flush(struct intel_guc *guc) ...@@ -59,11 +58,15 @@ static int guc_log_flush(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action)); return intel_guc_send(guc, action, ARRAY_SIZE(action));
} }
static int guc_log_control(struct intel_guc *guc, u32 control_val) static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity)
{ {
union guc_log_control control_val = {
.logging_enabled = enable,
.verbosity = verbosity,
};
u32 action[] = { u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
control_val control_val.value
}; };
return intel_guc_send(guc, action, ARRAY_SIZE(action)); return intel_guc_send(guc, action, ARRAY_SIZE(action));
...@@ -78,7 +81,8 @@ static int subbuf_start_callback(struct rchan_buf *buf, ...@@ -78,7 +81,8 @@ static int subbuf_start_callback(struct rchan_buf *buf,
void *prev_subbuf, void *prev_subbuf,
size_t prev_padding) size_t prev_padding)
{ {
/* Use no-overwrite mode by default, where relay will stop accepting /*
* Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left. * new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer * There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting * and Producer. In overwrite mode, there is a possibility of getting
...@@ -104,7 +108,8 @@ static struct dentry *create_buf_file_callback(const char *filename, ...@@ -104,7 +108,8 @@ static struct dentry *create_buf_file_callback(const char *filename,
{ {
struct dentry *buf_file; struct dentry *buf_file;
/* This to enable the use of a single buffer for the relay channel and /*
* This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which * correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing. * it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging. * Need to set 'is_global' even if parent is NULL for early logging.
...@@ -114,7 +119,8 @@ static struct dentry *create_buf_file_callback(const char *filename, ...@@ -114,7 +119,8 @@ static struct dentry *create_buf_file_callback(const char *filename,
if (!parent) if (!parent)
return NULL; return NULL;
/* Not using the channel filename passed as an argument, since for each /*
* Not using the channel filename passed as an argument, since for each
* channel relay appends the corresponding CPU number to the filename * channel relay appends the corresponding CPU number to the filename
* passed in relay_open(). This should be fine as relay just needs a * passed in relay_open(). This should be fine as relay just needs a
* dentry of the file associated with the channel buffer and that file's * dentry of the file associated with the channel buffer and that file's
...@@ -147,13 +153,16 @@ static int guc_log_relay_file_create(struct intel_guc *guc) ...@@ -147,13 +153,16 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
struct dentry *log_dir; struct dentry *log_dir;
int ret; int ret;
if (i915_modparams.guc_log_level < 0) if (!i915_modparams.guc_log_level)
return 0; return 0;
mutex_lock(&guc->log.runtime.relay_lock);
/* For now create the log file in /sys/kernel/debug/dri/0 dir */ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root; log_dir = dev_priv->drm.primary->debugfs_root;
/* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is /*
* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
* not mounted and so can't create the relay file. * not mounted and so can't create the relay file.
* The relay API seems to fit well with debugfs only, for availing relay * The relay API seems to fit well with debugfs only, for availing relay
* there are 3 requirements which can be met for debugfs file only in a * there are 3 requirements which can be met for debugfs file only in a
...@@ -166,25 +175,41 @@ static int guc_log_relay_file_create(struct intel_guc *guc) ...@@ -166,25 +175,41 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
*/ */
if (!log_dir) { if (!log_dir) {
DRM_ERROR("Debugfs dir not available yet for GuC log file\n"); DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
return -ENODEV; ret = -ENODEV;
goto out_unlock;
} }
ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir); ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
if (ret < 0 && ret != -EEXIST) { if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret); DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret; goto out_unlock;
} }
return 0; ret = 0;
out_unlock:
mutex_unlock(&guc->log.runtime.relay_lock);
return ret;
}
static bool guc_log_has_relay(struct intel_guc *guc)
{
lockdep_assert_held(&guc->log.runtime.relay_lock);
return guc->log.runtime.relay_chan != NULL;
} }
static void guc_move_to_next_buf(struct intel_guc *guc) static void guc_move_to_next_buf(struct intel_guc *guc)
{ {
/* Make sure the updates made in the sub buffer are visible when /*
* Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer. * Consumer sees the following update to offset inside the sub buffer.
*/ */
smp_wmb(); smp_wmb();
if (!guc_log_has_relay(guc))
return;
/* All data has been written, so now move the offset of sub buffer. */ /* All data has been written, so now move the offset of sub buffer. */
relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size); relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
...@@ -194,10 +219,11 @@ static void guc_move_to_next_buf(struct intel_guc *guc) ...@@ -194,10 +219,11 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
static void *guc_get_write_buffer(struct intel_guc *guc) static void *guc_get_write_buffer(struct intel_guc *guc)
{ {
if (!guc->log.runtime.relay_chan) if (!guc_log_has_relay(guc))
return NULL; return NULL;
/* Just get the base address of a new sub buffer and copy data into it /*
* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub * ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly * buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to * copy the data, but that would have been bit convoluted, as we need to
...@@ -262,15 +288,30 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) ...@@ -262,15 +288,30 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
/* Get the pointer to shared GuC log buffer */ /* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = guc->log.runtime.buf_addr; log_buf_state = src_data = guc->log.runtime.buf_addr;
mutex_lock(&guc->log.runtime.relay_lock);
/* Get the pointer to local buffer to store the logs */ /* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc); log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
if (unlikely(!log_buf_snapshot_state)) {
/*
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
guc->log.capture_miss_count++;
mutex_unlock(&guc->log.runtime.relay_lock);
return;
}
/* Actual logs are present from the 2nd page */ /* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE; src_data += PAGE_SIZE;
dst_data += PAGE_SIZE; dst_data += PAGE_SIZE;
for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
/* Make a copy of the state structure, inside GuC log buffer /*
* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading * (which is uncached mapped), on the stack to avoid reading
* from it multiple times. * from it multiple times.
*/ */
...@@ -290,14 +331,12 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) ...@@ -290,14 +331,12 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
log_buf_state->flush_to_file = 0; log_buf_state->flush_to_file = 0;
log_buf_state++; log_buf_state++;
if (unlikely(!log_buf_snapshot_state))
continue;
/* First copy the state structure in snapshot buffer */ /* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local, memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state)); sizeof(struct guc_log_buffer_state));
/* The write pointer could have been updated by GuC firmware, /*
* The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency * after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr * set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer. * in the snapshot buffer.
...@@ -332,15 +371,9 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) ...@@ -332,15 +371,9 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
dst_data += buffer_size; dst_data += buffer_size;
} }
if (log_buf_snapshot_state) guc_move_to_next_buf(guc);
guc_move_to_next_buf(guc);
else { mutex_unlock(&guc->log.runtime.relay_lock);
/* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
guc->log.capture_miss_count++;
}
} }
static void capture_logs_work(struct work_struct *work) static void capture_logs_work(struct work_struct *work)
...@@ -360,19 +393,21 @@ static int guc_log_runtime_create(struct intel_guc *guc) ...@@ -360,19 +393,21 @@ static int guc_log_runtime_create(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr; void *vaddr;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret; int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (!guc->log.vma)
return -ENODEV;
GEM_BUG_ON(guc_log_has_runtime(guc)); GEM_BUG_ON(guc_log_has_runtime(guc));
ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true); ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true);
if (ret) if (ret)
return ret; return ret;
/* Create a WC (Uncached for read) vmalloc mapping of log /*
* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data * buffer pages, so that we can directly get the data
* (up-to-date) from memory. * (up-to-date) from memory.
*/ */
...@@ -384,17 +419,55 @@ static int guc_log_runtime_create(struct intel_guc *guc) ...@@ -384,17 +419,55 @@ static int guc_log_runtime_create(struct intel_guc *guc)
guc->log.runtime.buf_addr = vaddr; guc->log.runtime.buf_addr = vaddr;
return 0;
}
static void guc_log_runtime_destroy(struct intel_guc *guc)
{
/*
* It's possible that the runtime stuff was never allocated because
* GuC log was disabled at the boot time.
*/
if (!guc_log_has_runtime(guc))
return;
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.runtime.buf_addr = NULL;
}
void intel_guc_log_init_early(struct intel_guc *guc)
{
mutex_init(&guc->log.runtime.relay_lock);
INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
}
int intel_guc_log_relay_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
if (!i915_modparams.guc_log_level)
return 0;
mutex_lock(&guc->log.runtime.relay_lock);
GEM_BUG_ON(guc_log_has_relay(guc));
/* Keep the size of sub buffers same as shared log buffer */ /* Keep the size of sub buffers same as shared log buffer */
subbuf_size = guc->log.vma->obj->base.size; subbuf_size = GUC_LOG_SIZE;
/* Store up to 8 snapshots, which is large enough to buffer sufficient /*
* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of * boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take * latency, for consuming the logs from relay. Also doesn't take
* up too much memory. * up too much memory.
*/ */
n_subbufs = 8; n_subbufs = 8;
/* Create a relay channel, so that we have buffers for storing /*
* Create a relay channel, so that we have buffers for storing
* the GuC firmware logs, the channel will be linked with a file * the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered. * later on when debugfs is registered.
*/ */
...@@ -404,33 +477,39 @@ static int guc_log_runtime_create(struct intel_guc *guc) ...@@ -404,33 +477,39 @@ static int guc_log_runtime_create(struct intel_guc *guc)
DRM_ERROR("Couldn't create relay chan for GuC logging\n"); DRM_ERROR("Couldn't create relay chan for GuC logging\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_vaddr; goto err;
} }
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.runtime.relay_chan = guc_log_relay_chan; guc->log.runtime.relay_chan = guc_log_relay_chan;
INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work); mutex_unlock(&guc->log.runtime.relay_lock);
return 0; return 0;
err_vaddr: err:
i915_gem_object_unpin_map(guc->log.vma->obj); mutex_unlock(&guc->log.runtime.relay_lock);
guc->log.runtime.buf_addr = NULL; /* logging will be off */
i915_modparams.guc_log_level = 0;
return ret; return ret;
} }
static void guc_log_runtime_destroy(struct intel_guc *guc) void intel_guc_log_relay_destroy(struct intel_guc *guc)
{ {
mutex_lock(&guc->log.runtime.relay_lock);
/* /*
* It's possible that the runtime stuff was never allocated because * It's possible that the relay was never allocated because
* guc_log_level was < 0 at the time * GuC log was disabled at the boot time.
**/ */
if (!guc_log_has_runtime(guc)) if (!guc_log_has_relay(guc))
return; goto out_unlock;
relay_close(guc->log.runtime.relay_chan); relay_close(guc->log.runtime.relay_chan);
i915_gem_object_unpin_map(guc->log.vma->obj); guc->log.runtime.relay_chan = NULL;
guc->log.runtime.buf_addr = NULL;
out_unlock:
mutex_unlock(&guc->log.runtime.relay_lock);
} }
static int guc_log_late_setup(struct intel_guc *guc) static int guc_log_late_setup(struct intel_guc *guc)
...@@ -438,16 +517,24 @@ static int guc_log_late_setup(struct intel_guc *guc) ...@@ -438,16 +517,24 @@ static int guc_log_late_setup(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
int ret; int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (!guc_log_has_runtime(guc)) { if (!guc_log_has_runtime(guc)) {
/* If log_level was set as -1 at boot time, then setup needed to /*
* handle log buffer flush interrupts would not have been done yet, * If log was disabled at boot time, then setup needed to handle
* so do that now. * log buffer flush interrupts would not have been done yet, so
* do that now.
*/ */
ret = guc_log_runtime_create(guc); ret = intel_guc_log_relay_create(guc);
if (ret) if (ret)
goto err; goto err;
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
ret = guc_log_runtime_create(guc);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
goto err_relay;
} }
ret = guc_log_relay_file_create(guc); ret = guc_log_relay_file_create(guc);
...@@ -457,10 +544,14 @@ static int guc_log_late_setup(struct intel_guc *guc) ...@@ -457,10 +544,14 @@ static int guc_log_late_setup(struct intel_guc *guc)
return 0; return 0;
err_runtime: err_runtime:
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_runtime_destroy(guc); guc_log_runtime_destroy(guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_relay:
intel_guc_log_relay_destroy(guc);
err: err:
/* logging will remain off */ /* logging will remain off */
i915_modparams.guc_log_level = -1; i915_modparams.guc_log_level = 0;
return ret; return ret;
} }
...@@ -470,7 +561,8 @@ static void guc_log_capture_logs(struct intel_guc *guc) ...@@ -470,7 +561,8 @@ static void guc_log_capture_logs(struct intel_guc *guc)
guc_read_update_log_buffer(guc); guc_read_update_log_buffer(guc);
/* Generally device is expected to be active only at this /*
* Generally device is expected to be active only at this
* time, so get/put should be really quick. * time, so get/put should be really quick.
*/ */
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
...@@ -482,20 +574,26 @@ static void guc_flush_logs(struct intel_guc *guc) ...@@ -482,20 +574,26 @@ static void guc_flush_logs(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
if (!USES_GUC_SUBMISSION(dev_priv) || if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
(i915_modparams.guc_log_level < 0))
return; return;
/* First disable the interrupts, will be renabled afterwards */ /* First disable the interrupts, will be renabled afterwards */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
gen9_disable_guc_interrupts(dev_priv); gen9_disable_guc_interrupts(dev_priv);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
/* Before initiating the forceful flush, wait for any pending/ongoing /*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen. * flush to complete otherwise forceful flush may not actually happen.
*/ */
flush_work(&guc->log.runtime.flush_work); flush_work(&guc->log.runtime.flush_work);
/* Ask GuC to update the log buffer state */ /* Ask GuC to update the log buffer state */
intel_runtime_pm_get(dev_priv);
guc_log_flush(guc); guc_log_flush(guc);
intel_runtime_pm_put(dev_priv);
/* GuC would have updated log buffer by now, so capture it */ /* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(guc); guc_log_capture_logs(guc);
...@@ -506,21 +604,12 @@ int intel_guc_log_create(struct intel_guc *guc) ...@@ -506,21 +604,12 @@ int intel_guc_log_create(struct intel_guc *guc)
struct i915_vma *vma; struct i915_vma *vma;
unsigned long offset; unsigned long offset;
u32 flags; u32 flags;
u32 size;
int ret; int ret;
GEM_BUG_ON(guc->log.vma); GEM_BUG_ON(guc->log.vma);
if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX) /*
i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX; * We require SSE 4.1 for fast reads from the GuC log buffer and
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
size = (1 + GUC_LOG_DPC_PAGES + 1 +
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based * it should be present on the chipsets supporting GuC based
* submisssions. * submisssions.
*/ */
...@@ -529,7 +618,7 @@ int intel_guc_log_create(struct intel_guc *guc) ...@@ -529,7 +618,7 @@ int intel_guc_log_create(struct intel_guc *guc)
goto err; goto err;
} }
vma = intel_guc_allocate_vma(guc, size); vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err;
...@@ -537,7 +626,7 @@ int intel_guc_log_create(struct intel_guc *guc) ...@@ -537,7 +626,7 @@ int intel_guc_log_create(struct intel_guc *guc)
guc->log.vma = vma; guc->log.vma = vma;
if (i915_modparams.guc_log_level >= 0) { if (i915_modparams.guc_log_level) {
ret = guc_log_runtime_create(guc); ret = guc_log_runtime_create(guc);
if (ret < 0) if (ret < 0)
goto err_vma; goto err_vma;
...@@ -558,7 +647,7 @@ int intel_guc_log_create(struct intel_guc *guc) ...@@ -558,7 +647,7 @@ int intel_guc_log_create(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->log.vma); i915_vma_unpin_and_release(&guc->log.vma);
err: err:
/* logging will be off */ /* logging will be off */
i915_modparams.guc_log_level = -1; i915_modparams.guc_log_level = 0;
return ret; return ret;
} }
...@@ -568,35 +657,46 @@ void intel_guc_log_destroy(struct intel_guc *guc) ...@@ -568,35 +657,46 @@ void intel_guc_log_destroy(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->log.vma); i915_vma_unpin_and_release(&guc->log.vma);
} }
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) int intel_guc_log_control(struct intel_guc *guc, u64 control_val)
{ {
struct intel_guc *guc = &dev_priv->guc; struct drm_i915_private *dev_priv = guc_to_i915(guc);
bool enable_logging = control_val > 0;
union guc_log_control log_param; u32 verbosity;
int ret; int ret;
log_param.value = control_val; if (!guc->log.vma)
return -ENODEV;
if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN || BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN);
log_param.verbosity > GUC_LOG_VERBOSITY_MAX) if (control_val > 1 + GUC_LOG_VERBOSITY_MAX)
return -EINVAL; return -EINVAL;
/* This combination doesn't make sense & won't have any effect */ /* This combination doesn't make sense & won't have any effect */
if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0)) if (!enable_logging && !i915_modparams.guc_log_level)
return 0; return 0;
ret = guc_log_control(guc, log_param.value); verbosity = enable_logging ? control_val - 1 : 0;
ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
ret = guc_log_control(guc, enable_logging, verbosity);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret < 0) { if (ret < 0) {
DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret); DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
return ret; return ret;
} }
if (log_param.logging_enabled) { if (enable_logging) {
i915_modparams.guc_log_level = log_param.verbosity; i915_modparams.guc_log_level = 1 + verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file /*
* wouldn't have been created by now and interrupts also would not have * If log was disabled at boot time, then the relay channel file
* been enabled. Try again now, just in case. * wouldn't have been created by now and interrupts also would
* not have been enabled. Try again now, just in case.
*/ */
ret = guc_log_late_setup(guc); ret = guc_log_late_setup(guc);
if (ret < 0) { if (ret < 0) {
...@@ -605,9 +705,14 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -605,9 +705,14 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
} }
/* GuC logging is currently the only user of Guc2Host interrupts */ /* GuC logging is currently the only user of Guc2Host interrupts */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_get(dev_priv);
gen9_enable_guc_interrupts(dev_priv); gen9_enable_guc_interrupts(dev_priv);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
} else { } else {
/* Once logging is disabled, GuC won't generate logs & send an /*
* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer * interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log * which is yet to be captured. So request GuC to update the log
* buffer state and then collect the left over logs. * buffer state and then collect the left over logs.
...@@ -615,7 +720,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -615,7 +720,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
guc_flush_logs(guc); guc_flush_logs(guc);
/* As logging is disabled, update log level to reflect that */ /* As logging is disabled, update log level to reflect that */
i915_modparams.guc_log_level = -1; i915_modparams.guc_log_level = 0;
} }
return ret; return ret;
...@@ -623,23 +728,27 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -623,23 +728,27 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv) void i915_guc_log_register(struct drm_i915_private *dev_priv)
{ {
if (!USES_GUC_SUBMISSION(dev_priv) || if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level)
(i915_modparams.guc_log_level < 0))
return; return;
mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_late_setup(&dev_priv->guc); guc_log_late_setup(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
} }
void i915_guc_log_unregister(struct drm_i915_private *dev_priv) void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{ {
struct intel_guc *guc = &dev_priv->guc;
if (!USES_GUC_SUBMISSION(dev_priv)) if (!USES_GUC_SUBMISSION(dev_priv))
return; return;
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
/* GuC logging is currently the only user of Guc2Host interrupts */ /* GuC logging is currently the only user of Guc2Host interrupts */
intel_runtime_pm_get(dev_priv);
gen9_disable_guc_interrupts(dev_priv); gen9_disable_guc_interrupts(dev_priv);
guc_log_runtime_destroy(&dev_priv->guc); intel_runtime_pm_put(dev_priv);
guc_log_runtime_destroy(guc);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
intel_guc_log_relay_destroy(guc);
} }
...@@ -32,6 +32,13 @@ ...@@ -32,6 +32,13 @@
struct drm_i915_private; struct drm_i915_private;
struct intel_guc; struct intel_guc;
/*
* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap
*/
#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
struct intel_guc_log { struct intel_guc_log {
u32 flags; u32 flags;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -41,6 +48,8 @@ struct intel_guc_log { ...@@ -41,6 +48,8 @@ struct intel_guc_log {
struct workqueue_struct *flush_wq; struct workqueue_struct *flush_wq;
struct work_struct flush_work; struct work_struct flush_work;
struct rchan *relay_chan; struct rchan *relay_chan;
/* To serialize the access to relay_chan */
struct mutex relay_lock;
} runtime; } runtime;
/* logging related stats */ /* logging related stats */
u32 capture_miss_count; u32 capture_miss_count;
...@@ -52,7 +61,10 @@ struct intel_guc_log { ...@@ -52,7 +61,10 @@ struct intel_guc_log {
int intel_guc_log_create(struct intel_guc *guc); int intel_guc_log_create(struct intel_guc *guc);
void intel_guc_log_destroy(struct intel_guc *guc); void intel_guc_log_destroy(struct intel_guc *guc);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); void intel_guc_log_init_early(struct intel_guc *guc);
int intel_guc_log_relay_create(struct intel_guc *guc);
void intel_guc_log_relay_destroy(struct intel_guc *guc);
int intel_guc_log_control(struct intel_guc *guc, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv); void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv); void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
......
...@@ -359,7 +359,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, ...@@ -359,7 +359,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
case ENGINE_DEAD: case ENGINE_DEAD:
if (drm_debug & DRM_UT_DRIVER) { if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("hangcheck"); struct drm_printer p = drm_debug_printer("hangcheck");
intel_engine_dump(engine, &p, "%s", engine->name); intel_engine_dump(engine, &p, "%s\n", engine->name);
} }
break; break;
......
...@@ -1567,7 +1567,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) ...@@ -1567,7 +1567,10 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
* there's nothing connected to the port. * there's nothing connected to the port.
*/ */
if (type == DRM_DP_DUAL_MODE_UNKNOWN) { if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
if (has_edid && /* An overridden EDID imply that we want this port for testing.
* Make sure not to set limits for that port.
*/
if (has_edid && !connector->override_edid &&
intel_bios_is_port_dp_dual_mode(dev_priv, port)) { intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI; type = DRM_DP_DUAL_MODE_TYPE1_DVI;
...@@ -1932,6 +1935,9 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1932,6 +1935,9 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
case PORT_D: case PORT_D:
ddc_pin = GMBUS_PIN_4_CNP; ddc_pin = GMBUS_PIN_4_CNP;
break; break;
case PORT_F:
ddc_pin = GMBUS_PIN_3_BXT;
break;
default: default:
MISSING_CASE(port); MISSING_CASE(port);
ddc_pin = GMBUS_PIN_1_BXT; ddc_pin = GMBUS_PIN_1_BXT;
...@@ -1940,6 +1946,37 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1940,6 +1946,37 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin; return ddc_pin;
} }
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
u8 ddc_pin;
switch (port) {
case PORT_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
case PORT_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
case PORT_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
case PORT_D:
ddc_pin = GMBUS_PIN_10_TC2_ICP;
break;
case PORT_E:
ddc_pin = GMBUS_PIN_11_TC3_ICP;
break;
case PORT_F:
ddc_pin = GMBUS_PIN_12_TC4_ICP;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_2_BXT;
break;
}
return ddc_pin;
}
static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
enum port port) enum port port)
{ {
...@@ -1982,6 +2019,8 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, ...@@ -1982,6 +2019,8 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv)) else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
else if (IS_ICELAKE(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
...@@ -2052,7 +2091,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -2052,7 +2091,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (WARN_ON(port == PORT_A)) if (WARN_ON(port == PORT_A))
return; return;
intel_encoder->hpd_pin = intel_hpd_pin(port); intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
if (HAS_DDI(dev_priv)) if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
......
...@@ -75,11 +75,22 @@ static const struct gmbus_pin gmbus_pins_cnp[] = { ...@@ -75,11 +75,22 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
[GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
}; };
static const struct gmbus_pin gmbus_pins_icp[] = {
[GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
[GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
[GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
};
/* pin is expected to be valid */ /* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin) unsigned int pin)
{ {
if (HAS_PCH_CNP(dev_priv)) if (HAS_PCH_ICP(dev_priv))
return &gmbus_pins_icp[pin];
else if (HAS_PCH_CNP(dev_priv))
return &gmbus_pins_cnp[pin]; return &gmbus_pins_cnp[pin];
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin]; return &gmbus_pins_bxt[pin];
...@@ -96,7 +107,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, ...@@ -96,7 +107,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{ {
unsigned int size; unsigned int size;
if (HAS_PCH_CNP(dev_priv)) if (HAS_PCH_ICP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_icp);
else if (HAS_PCH_CNP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_cnp); size = ARRAY_SIZE(gmbus_pins_cnp);
else if (IS_GEN9_LP(dev_priv)) else if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt); size = ARRAY_SIZE(gmbus_pins_bxt);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册