提交 3f1f0b1c 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2017-12-01' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

[airlied: fix conflict in intel_dsi.c]

drm-intel-next-2017-12-01:

- Init clock gate fix (Ville)
- Execlists event handling corrections (Chris, Michel)
- Improvements on GPU Cache invalidation and context switch (Chris)
- More perf OA changes (Lionel)
- More selftests improvements and fixes (Chris, Matthew)
- Clean-up on modules parameters (Chris)
- Clean-up around old ringbuffer submission and hw semaphore on old platforms (Chris)
- More Cannonlake stabilization effort (David, James)
- Display planes clean-up and improvements (Ville)
- New PMU interface for perf queries... (Tvrtko)
- ... and other subsequent PMU changes and fixes (Tvrtko, Chris)
- Remove success dmesg noise from rotation (Chris)
- New DMC for Kabylake (Anusha)
- Fixes around atomic commits (Daniel)
- GuC updates and fixes (Sagar, Michal, Chris)
- Couple gmbus/i2c fixes (Ville)
- Use exponential backoff for all our wait_for() (Chris)
- Fixes for i915/fbdev (Chris)
- Backlight fixes (Arnd)
- Updates on shrinker (Chris)
- Make Hotplug enable more robuts (Chris)
- Disable huge pages (TPH) on lack of a needed workaround (Joonas)
- New GuC images for SKL, KBL, BXT (Sagar)
- Add HW Workaround for Geminilake performance (Valtteri)
- Fixes for PPS timings (Imre)
- More IPS fixes (Maarten)
- Many fixes for Display Port on gen2-gen4 (Ville)
- Retry GPU reset making the recover from hang more robust (Chris)

* tag 'drm-intel-next-2017-12-01' of git://anongit.freedesktop.org/drm/drm-intel: (101 commits)
  drm/i915: Update DRIVER_DATE to 20171201
  drm/i915/cnl: Mask previous DDI - PLL mapping
  drm/i915: Remove unsafe i915.enable_rc6
  drm/i915: Sleep and retry a GPU reset if at first we don't succeed
  drm/i915: Interlaced DP output doesn't work on VLV/CHV
  drm/i915: Pass crtc state to intel_pipe_{enable,disable}()
  drm/i915: Wait for pipe to start on i830 as well
  drm/i915: Fix vblank timestamp/frame counter jumps on gen2
  drm/i915: Fix deadlock in i830_disable_pipe()
  drm/i915: Fix has_audio readout for DDI A
  drm/i915: Don't add the "force audio" property to DP connectors that don't support audio
  drm/i915: Disable DP audio for g4x
  drm/i915/selftests: Wake the device before executing requests on the GPU
  drm/i915: Set fake_vma.size as well as fake_vma.node.size for capture
  drm/i915: Tidy up signed/unsigned comparison
  drm/i915: Enable IPS with only sprite plane visible too, v4.
  drm/i915: Make ips_enabled a property depending on whether IPS is enabled, v3.
  drm/i915: Avoid PPS HW/SW state mismatch due to rounding
  drm/i915: Skip switch-to-kernel-context on suspend when wedged
  drm/i915/glk: Apply WaProgramL3SqcReg1DefaultForPerf for GLK too
  ...
...@@ -18,6 +18,7 @@ config DRM_I915_WERROR ...@@ -18,6 +18,7 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG config DRM_I915_DEBUG
bool "Enable additional driver debugging" bool "Enable additional driver debugging"
depends on DRM_I915 depends on DRM_I915
select DEBUG_FS
select PREEMPT_COUNT select PREEMPT_COUNT
select I2C_CHARDEV select I2C_CHARDEV
select DRM_DP_AUX_CHARDEV select DRM_DP_AUX_CHARDEV
......
...@@ -46,6 +46,7 @@ i915-y := i915_drv.o \ ...@@ -46,6 +46,7 @@ i915-y := i915_drv.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code # GEM code
i915-y += i915_cmd_parser.o \ i915-y += i915_cmd_parser.o \
......
...@@ -294,8 +294,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) ...@@ -294,8 +294,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
* write. * write.
*/ */
if (mmio->in_context && if (mmio->in_context &&
((ctx_ctrl & inhibit_mask) != inhibit_mask) && (ctx_ctrl & inhibit_mask) != inhibit_mask)
i915_modparams.enable_execlists)
continue; continue;
if (mmio->mask) if (mmio->mask)
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
*/ */
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_ringbuffer.h"
/** /**
* DOC: batch buffer command parser * DOC: batch buffer command parser
...@@ -940,7 +941,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -940,7 +941,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
return; return;
} }
engine->needs_cmd_parser = true; engine->flags |= I915_ENGINE_NEEDS_CMD_PARSER;
} }
/** /**
...@@ -952,7 +953,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) ...@@ -952,7 +953,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
*/ */
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{ {
if (!engine->needs_cmd_parser) if (!intel_engine_needs_cmd_parser(engine))
return; return;
fini_hash_table(engine); fini_hash_table(engine);
...@@ -1350,7 +1351,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) ...@@ -1350,7 +1351,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */ /* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
if (engine->needs_cmd_parser) { if (intel_engine_needs_cmd_parser(engine)) {
active = true; active = true;
break; break;
} }
......
...@@ -1151,13 +1151,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1151,13 +1151,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (INTEL_GEN(dev_priv) >= 9) cagf = intel_gpu_freq(dev_priv,
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; intel_get_cagf(dev_priv, rpstat));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf = intel_gpu_freq(dev_priv, cagf);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
...@@ -1989,75 +1984,6 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1989,75 +1984,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0; return 0;
} }
static void i915_dump_lrc_obj(struct seq_file *m,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct i915_vma *vma = ctx->engine[engine->id].state;
struct page *page;
int j;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (!vma) {
seq_puts(m, "\tFake context\n");
return;
}
if (vma->flags & I915_VMA_GLOBAL_BIND)
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
if (page) {
u32 *reg_state = kmap_atomic(page);
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m,
"\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
j * 4,
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
kunmap_atomic(reg_state);
}
i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret;
if (!i915_modparams.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static const char *swizzle_string(unsigned swizzle) static const char *swizzle_string(unsigned swizzle)
{ {
switch (swizzle) { switch (swizzle) {
...@@ -3304,69 +3230,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused) ...@@ -3304,69 +3230,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
int num_rings = INTEL_INFO(dev_priv)->num_rings;
enum intel_engine_id id;
int j, ret;
if (!i915_modparams.semaphores) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
if (IS_BROADWELL(dev_priv)) {
struct page *page;
uint64_t *seqno;
page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
for_each_engine(engine, dev_priv, id) {
uint64_t offset;
seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
offset = id * I915_NUM_ENGINES + j;
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
seq_putc(m, '\n');
seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) {
offset = id + (j * I915_NUM_ENGINES);
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
seq_putc(m, '\n');
}
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
for_each_engine(engine, dev_priv, id)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *dev_priv = node_to_i915(m->private);
...@@ -4833,7 +4696,6 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -4833,7 +4696,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_vbt", i915_vbt, 0}, {"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0}, {"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0}, {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0},
...@@ -4847,7 +4709,6 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -4847,7 +4709,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_display_info", i915_display_info, 0}, {"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0}, {"i915_engine_info", i915_engine_info, 0},
{"i915_shrinker_info", i915_shrinker_info, 0}, {"i915_shrinker_info", i915_shrinker_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0}, {"i915_wa_registers", i915_wa_registers, 0},
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "i915_pmu.h"
#include "i915_vgpu.h" #include "i915_vgpu.h"
#include "intel_drv.h" #include "intel_drv.h"
#include "intel_uc.h" #include "intel_uc.h"
...@@ -321,7 +322,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -321,7 +322,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv); value = USES_PPGTT(dev_priv);
break; break;
case I915_PARAM_HAS_SEMAPHORES: case I915_PARAM_HAS_SEMAPHORES:
value = i915_modparams.semaphores; value = HAS_LEGACY_SEMAPHORES(dev_priv);
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN); value = capable(CAP_SYS_ADMIN);
...@@ -371,9 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -371,9 +372,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
value |= I915_SCHEDULER_CAP_ENABLED; value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY; value |= I915_SCHEDULER_CAP_PRIORITY;
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
i915_modparams.enable_execlists)
value |= I915_SCHEDULER_CAP_PREEMPTION; value |= I915_SCHEDULER_CAP_PREEMPTION;
} }
break; break;
...@@ -694,8 +693,6 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -694,8 +693,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */ /* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
drm_kms_helper_poll_init(dev);
return 0; return 0;
cleanup_gem: cleanup_gem:
...@@ -936,8 +933,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -936,8 +933,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_detect_preproduction_hw(dev_priv); intel_detect_preproduction_hw(dev_priv);
i915_perf_init(dev_priv);
return 0; return 0;
err_irq: err_irq:
...@@ -954,7 +949,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -954,7 +949,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
*/ */
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{ {
i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv); i915_gem_load_cleanup(dev_priv);
intel_irq_fini(dev_priv); intel_irq_fini(dev_priv);
i915_workqueues_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv);
...@@ -1057,10 +1051,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) ...@@ -1057,10 +1051,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{ {
i915_modparams.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915_modparams.enable_execlists);
/* /*
* i915.enable_ppgtt is read-only, so do an early pass to validate the * i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We * user's requested state against the hardware/driver capabilities. We
...@@ -1072,11 +1062,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) ...@@ -1072,11 +1062,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
i915_modparams.enable_ppgtt); i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
i915_modparams.semaphores =
intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
yesno(i915_modparams.semaphores));
intel_uc_sanitize_options(dev_priv); intel_uc_sanitize_options(dev_priv);
intel_gvt_sanitize_options(dev_priv); intel_gvt_sanitize_options(dev_priv);
...@@ -1101,6 +1086,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1101,6 +1086,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv); intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv); ret = i915_ggtt_probe_hw(dev_priv);
if (ret) if (ret)
return ret; return ret;
...@@ -1206,6 +1193,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -1206,6 +1193,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{ {
struct pci_dev *pdev = dev_priv->drm.pdev; struct pci_dev *pdev = dev_priv->drm.pdev;
i915_perf_fini(dev_priv);
if (pdev->msi_enabled) if (pdev->msi_enabled)
pci_disable_msi(pdev); pci_disable_msi(pdev);
...@@ -1224,7 +1213,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) ...@@ -1224,7 +1213,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = &dev_priv->drm; struct drm_device *dev = &dev_priv->drm;
i915_gem_shrinker_init(dev_priv); i915_gem_shrinker_register(dev_priv);
i915_pmu_register(dev_priv);
/* /*
* Notify a valid surface after modesetting, * Notify a valid surface after modesetting,
...@@ -1263,6 +1253,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) ...@@ -1263,6 +1253,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* cannot run before the connectors are registered. * cannot run before the connectors are registered.
*/ */
intel_fbdev_initial_config_async(dev); intel_fbdev_initial_config_async(dev);
/*
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
if (INTEL_INFO(dev_priv)->num_pipes)
drm_kms_helper_poll_init(dev);
} }
/** /**
...@@ -1274,17 +1271,25 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) ...@@ -1274,17 +1271,25 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_fbdev_unregister(dev_priv); intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv); intel_audio_deinit(dev_priv);
/*
* After flushing the fbdev (incl. a late async config which will
* have delayed queuing of a hotplug event), then flush the hotplug
* events.
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
intel_gpu_ips_teardown(); intel_gpu_ips_teardown();
acpi_video_unregister(); acpi_video_unregister();
intel_opregion_unregister(dev_priv); intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv); i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv); i915_teardown_sysfs(dev_priv);
i915_guc_log_unregister(dev_priv); i915_guc_log_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm); drm_dev_unregister(&dev_priv->drm);
i915_gem_shrinker_cleanup(dev_priv); i915_gem_shrinker_unregister(dev_priv);
} }
/** /**
...@@ -1872,7 +1877,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) ...@@ -1872,7 +1877,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
{ {
struct i915_gpu_error *error = &i915->gpu_error; struct i915_gpu_error *error = &i915->gpu_error;
int ret; int ret;
int i;
might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
...@@ -1895,12 +1902,20 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) ...@@ -1895,12 +1902,20 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto error; goto error;
} }
ret = intel_gpu_reset(i915, ALL_ENGINES); if (!intel_has_gpu_reset(i915)) {
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
for (i = 0; i < 3; i++) {
ret = intel_gpu_reset(i915, ALL_ENGINES);
if (ret == 0)
break;
msleep(100);
}
if (ret) { if (ret) {
if (ret != -ENODEV) dev_err(i915->drm.dev, "Failed to reset chip\n");
DRM_ERROR("Failed to reset chip: %i\n", ret);
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error; goto error;
} }
...@@ -2512,7 +2527,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2512,7 +2527,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
int ret; int ret;
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && intel_rc6_enabled()))) if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
return -ENODEV; return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/perf_event.h>
#include <linux/pm_qos.h> #include <linux/pm_qos.h>
#include <linux/reservation.h> #include <linux/reservation.h>
#include <linux/shmem_fs.h> #include <linux/shmem_fs.h>
...@@ -79,8 +80,8 @@ ...@@ -79,8 +80,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20171117" #define DRIVER_DATE "20171201"
#define DRIVER_TIMESTAMP 1510958822 #define DRIVER_TIMESTAMP 1512176839
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -304,9 +305,9 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder) ...@@ -304,9 +305,9 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder)
/* /*
* Global legacy plane identifier. Valid only for primary/sprite * Global legacy plane identifier. Valid only for primary/sprite
* planes on pre-g4x, and only for primary planes on g4x+. * planes on pre-g4x, and only for primary planes on g4x-bdw.
*/ */
enum plane { enum i9xx_plane_id {
PLANE_A, PLANE_A,
PLANE_B, PLANE_B,
PLANE_C, PLANE_C,
...@@ -560,13 +561,13 @@ struct i915_hotplug { ...@@ -560,13 +561,13 @@ struct i915_hotplug {
for_each_power_well_rev(__dev_priv, __power_well) \ for_each_power_well_rev(__dev_priv, __power_well) \
for_each_if ((__power_well)->domains & (__domain_mask)) for_each_if ((__power_well)->domains & (__domain_mask))
#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \ #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \ for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
(plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
(__i)++) \ (__i)++) \
for_each_if (plane_state) for_each_if (plane)
#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \ #define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
for ((__i) = 0; \ for ((__i) = 0; \
...@@ -576,7 +577,6 @@ struct i915_hotplug { ...@@ -576,7 +577,6 @@ struct i915_hotplug {
(__i)++) \ (__i)++) \
for_each_if (crtc) for_each_if (crtc)
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ #define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \ for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
...@@ -698,7 +698,8 @@ struct drm_i915_display_funcs { ...@@ -698,7 +698,8 @@ struct drm_i915_display_funcs {
struct intel_cdclk_state *cdclk_state); struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv, void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state); const struct intel_cdclk_state *cdclk_state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate); int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev, int (*compute_intermediate_wm)(struct drm_device *dev,
struct intel_crtc *intel_crtc, struct intel_crtc *intel_crtc,
...@@ -942,7 +943,6 @@ struct i915_gpu_state { ...@@ -942,7 +943,6 @@ struct i915_gpu_state {
u64 fence[I915_MAX_NUM_FENCES]; u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay; struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display; struct intel_display_error_state *display;
struct drm_i915_error_object *semaphore;
struct drm_i915_error_engine { struct drm_i915_error_engine {
int engine_id; int engine_id;
...@@ -1009,6 +1009,7 @@ struct i915_gpu_state { ...@@ -1009,6 +1009,7 @@ struct i915_gpu_state {
long user_bo_count; long user_bo_count;
struct drm_i915_error_object *wa_ctx; struct drm_i915_error_object *wa_ctx;
struct drm_i915_error_object *default_state;
struct drm_i915_error_request { struct drm_i915_error_request {
long jiffies; long jiffies;
...@@ -1145,7 +1146,7 @@ struct intel_fbc { ...@@ -1145,7 +1146,7 @@ struct intel_fbc {
struct { struct {
enum pipe pipe; enum pipe pipe;
enum plane plane; enum i9xx_plane_id i9xx_plane;
unsigned int fence_y_offset; unsigned int fence_y_offset;
} crtc; } crtc;
...@@ -2291,7 +2292,8 @@ struct drm_i915_private { ...@@ -2291,7 +2292,8 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context; struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */ /* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context; struct i915_gem_context *preempt_context;
struct i915_vma *semaphore; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
struct drm_dma_handle *status_page_dmah; struct drm_dma_handle *status_page_dmah;
struct resource mch_res; struct resource mch_res;
...@@ -2619,7 +2621,6 @@ struct drm_i915_private { ...@@ -2619,7 +2621,6 @@ struct drm_i915_private {
bool periodic; bool periodic;
int period_exponent; int period_exponent;
int timestamp_frequency;
struct i915_oa_config test_config; struct i915_oa_config test_config;
...@@ -2764,6 +2765,8 @@ struct drm_i915_private { ...@@ -2764,6 +2765,8 @@ struct drm_i915_private {
int irq; int irq;
} lpe_audio; } lpe_audio;
struct i915_pmu pmu;
/* /*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place. * will be rejected. Instead look for a better place.
...@@ -3142,6 +3145,8 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3142,6 +3145,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
...@@ -3154,6 +3159,9 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3154,6 +3159,9 @@ intel_info(const struct drm_i915_private *dev_priv)
((dev_priv)->info.has_logical_ring_contexts) ((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption) ((dev_priv)->info.has_logical_ring_preemption)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
...@@ -3205,8 +3213,10 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -3205,8 +3213,10 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
...@@ -3302,8 +3312,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) ...@@ -3302,8 +3312,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt); int enable_ppgtt);
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value);
/* i915_drv.c */ /* i915_drv.c */
void __printf(3, 4) void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level, __i915_printk(struct drm_i915_private *dev_priv, const char *level,
...@@ -3905,7 +3913,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv, ...@@ -3905,7 +3913,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
phys_addr_t size); phys_addr_t size);
/* i915_gem_shrinker.c */ /* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target, unsigned long target,
unsigned long *nr_scanned, unsigned long *nr_scanned,
unsigned flags); unsigned flags);
...@@ -3914,9 +3922,9 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -3914,9 +3922,9 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_BOUND 0x4 #define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8 #define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10 #define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
...@@ -4223,9 +4231,17 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder, ...@@ -4223,9 +4231,17 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg); const i915_reg_t reg);
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
......
...@@ -3371,7 +3371,9 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -3371,7 +3371,9 @@ i915_gem_idle_work_handler(struct work_struct *work)
synchronize_irq(dev_priv->drm.irq); synchronize_irq(dev_priv->drm.irq);
intel_engines_park(dev_priv); intel_engines_park(dev_priv);
i915_gem_timelines_mark_idle(dev_priv); i915_gem_timelines_park(dev_priv);
i915_pmu_gt_parked(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake); GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false; dev_priv->gt.awake = false;
...@@ -4772,17 +4774,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ...@@ -4772,17 +4774,19 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
ret = i915_gem_switch_to_kernel_context(dev_priv); if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
if (ret) ret = i915_gem_switch_to_kernel_context(dev_priv);
goto err_unlock; if (ret)
goto err_unlock;
ret = i915_gem_wait_for_idle(dev_priv, ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE | I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED); I915_WAIT_LOCKED);
if (ret && ret != -EIO) if (ret && ret != -EIO)
goto err_unlock; goto err_unlock;
assert_kernel_context_is_current(dev_priv); assert_kernel_context_is_current(dev_priv);
}
i915_gem_contexts_lost(dev_priv); i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -4997,25 +5001,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ...@@ -4997,25 +5001,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
if (INTEL_INFO(dev_priv)->gen < 6)
return false;
/* TODO: make semaphores and Execlists play nicely together */
if (i915_modparams.enable_execlists)
return false;
if (value >= 0)
return value;
/* Enable semaphores on SNB when IO remapping is off */
if (IS_GEN6(dev_priv) && intel_vtd_active())
return false;
return true;
}
static int __intel_engines_record_defaults(struct drm_i915_private *i915) static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
...@@ -5045,7 +5030,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -5045,7 +5030,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto out_ctx; goto out_ctx;
} }
err = i915_switch_context(rq); err = 0;
if (engine->init_context) if (engine->init_context)
err = engine->init_context(rq); err = engine->init_context(rq);
...@@ -5134,8 +5119,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -5134,8 +5119,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
{ {
int ret; int ret;
mutex_lock(&dev_priv->drm.struct_mutex);
/* /*
* We need to fallback to 4K pages since gvt gtt handling doesn't * We need to fallback to 4K pages since gvt gtt handling doesn't
* support huge page entries - we will need to check either hypervisor * support huge page entries - we will need to check either hypervisor
...@@ -5147,26 +5130,27 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -5147,26 +5130,27 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (!i915_modparams.enable_execlists) { if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
dev_priv->gt.resume = intel_lr_context_resume; dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} else {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} }
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
/* This is just a security blanket to placate dragons. /* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs * On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If * used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems * we hold the forcewake during initialisation these problems
* just magically go away. * just magically go away.
*/ */
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
goto out_unlock;
ret = i915_gem_init_ggtt(dev_priv); ret = i915_gem_init_ggtt(dev_priv);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -460,14 +460,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -460,14 +460,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list); init_llist_head(&dev_priv->contexts.free_list);
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915_modparams.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
/* Using the simple ida interface, the max is limited by sizeof(int) */ /* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida); ida_init(&dev_priv->contexts.hw_ida);
...@@ -515,6 +507,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ...@@ -515,6 +507,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
engine->legacy_active_context = NULL; engine->legacy_active_context = NULL;
engine->legacy_active_ppgtt = NULL;
if (!engine->last_retired_context) if (!engine->last_retired_context)
continue; continue;
...@@ -574,300 +567,6 @@ void i915_gem_context_close(struct drm_file *file) ...@@ -574,300 +567,6 @@ void i915_gem_context_close(struct drm_file *file)
idr_destroy(&file_priv->context_idr); idr_destroy(&file_priv->context_idr);
} }
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
const int num_rings =
/* Use an extended w/a on gen7 if signalling from other rings */
(i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
u32 *cs;
flags |= MI_MM_SPACE_GTT;
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
/* These flags are for resource streamer on HSW+ */
flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
else
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
cs = intel_ring_begin(req, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
*cs++ = _MASKED_BIT_ENABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
}
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
*cs++ = MI_NOOP;
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = _MASKED_BIT_DISABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = i915_ggtt_offset(engine->scratch);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
}
intel_ring_advance(req, cs);
return 0;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *engine,
struct i915_gem_context *to)
{
if (to->remap_slice)
return false;
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
return to == engine->legacy_active_context;
}
static bool
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
{
struct i915_gem_context *from = engine->legacy_active_context;
if (!ppgtt)
return false;
/* Always load the ppgtt on first use */
if (!from)
return true;
/* Same context without new entries, skip */
if ((!from->ppgtt || from->ppgtt == ppgtt) &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
if (engine->id != RCS)
return true;
if (INTEL_GEN(engine->i915) < 8)
return true;
return false;
}
static bool
needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
struct i915_gem_context *to,
u32 hw_flags)
{
if (!ppgtt)
return false;
if (!IS_GEN8(to->i915))
return false;
if (hw_flags & MI_RESTORE_INHIBIT)
return true;
return false;
}
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct i915_gem_context *from = engine->legacy_active_context;
u32 hw_flags;
int ret, i;
GEM_BUG_ON(engine->id != RCS);
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
if (needs_pd_load_pre(ppgtt, engine)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
return ret;
}
if (i915_gem_context_is_kernel(to))
/*
* The kernel context(s) is treated as pure scratch and is not
* expected to retain any state (as we sacrifice it during
* suspend and on resume it may be corrupted). This is ok,
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
hw_flags = MI_RESTORE_INHIBIT;
else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
else
hw_flags = 0;
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
return ret;
engine->legacy_active_context = to;
}
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
* happened.
*/
if (ret)
return ret;
}
if (ppgtt)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = remap_l3(req, i);
if (ret)
return ret;
to->remap_slice &= ~(1<<i);
}
return 0;
}
/**
* i915_switch_context() - perform a GPU context switch.
* @req: request for which we'll execute the context switch
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
* it will have a refcount > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*
* This function should not be used in execlists mode. Instead the context is
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
if (i915_modparams.enable_execlists)
return 0;
if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx;
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
if (needs_pd_load_pre(ppgtt, engine)) {
int ret;
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
return ret;
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
engine->legacy_active_context = to;
return 0;
}
return do_rcs_switch(req);
}
static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine) static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
{ {
struct i915_gem_timeline *timeline; struct i915_gem_timeline *timeline;
...@@ -899,7 +598,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) ...@@ -899,7 +598,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
int ret;
if (engine_has_idle_kernel_context(engine)) if (engine_has_idle_kernel_context(engine))
continue; continue;
...@@ -922,10 +620,14 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) ...@@ -922,10 +620,14 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
GFP_KERNEL); GFP_KERNEL);
} }
ret = i915_switch_context(req); /*
i915_add_request(req); * Force a flush after the switch to ensure that all rendering
if (ret) * and operations prior to switching to the kernel context hits
return ret; * memory. This should be guaranteed by the previous request,
* but an extra layer of paranoia before we declare the system
* idle (on suspend etc) is advisable!
*/
__i915_add_request(req, true);
} }
return 0; return 0;
......
...@@ -271,7 +271,7 @@ static inline u64 gen8_noncanonical_addr(u64 address) ...@@ -271,7 +271,7 @@ static inline u64 gen8_noncanonical_addr(u64 address)
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{ {
return eb->engine->needs_cmd_parser && eb->batch_len; return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
} }
static int eb_create(struct i915_execbuffer *eb) static int eb_create(struct i915_execbuffer *eb)
...@@ -1111,14 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1111,14 +1111,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err) if (err)
goto err_request; goto err_request;
err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto err_request;
err = i915_switch_context(rq);
if (err)
goto err_request;
err = eb->engine->emit_bb_start(rq, err = eb->engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE, batch->node.start, PAGE_SIZE,
cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
...@@ -1818,8 +1810,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -1818,8 +1810,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
/* Unconditionally flush any chipset caches (for streaming writes). */ /* Unconditionally flush any chipset caches (for streaming writes). */
i915_gem_chipset_flush(eb->i915); i915_gem_chipset_flush(eb->i915);
/* Unconditionally invalidate GPU caches and TLBs. */ return 0;
return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
} }
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
...@@ -1965,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb) ...@@ -1965,10 +1956,6 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err) if (err)
return err; return err;
err = i915_switch_context(eb->request);
if (err)
return err;
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
err = i915_reset_gen7_sol_offsets(eb->request); err = i915_reset_gen7_sol_offsets(eb->request);
if (err) if (err)
......
...@@ -178,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, ...@@ -178,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0; return 0;
} }
if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) { if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (has_full_48bit_ppgtt) if (has_full_48bit_ppgtt)
return 3; return 3;
...@@ -2162,7 +2162,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) ...@@ -2162,7 +2162,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor /* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't * and the PDPs are contained within the context itself. We don't
* need to do anything here. */ * need to do anything here. */
if (i915_modparams.enable_execlists) if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0; return 0;
if (!USES_PPGTT(dev_priv)) if (!USES_PPGTT(dev_priv))
...@@ -3737,9 +3737,6 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, ...@@ -3737,9 +3737,6 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
rot_info->plane[i].stride, st, sg); rot_info->plane[i].stride, st, sg);
} }
DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
kvfree(page_addr_list); kvfree(page_addr_list);
return st; return st;
...@@ -3749,8 +3746,8 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, ...@@ -3749,8 +3746,8 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
err_st_alloc: err_st_alloc:
kvfree(page_addr_list); kvfree(page_addr_list);
DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -208,10 +208,6 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *rq) ...@@ -208,10 +208,6 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *rq)
if (err) if (err)
goto err_unpin; goto err_unpin;
err = engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto err_unpin;
err = engine->emit_bb_start(rq, err = engine->emit_bb_start(rq,
so.batch_offset, so.batch_size, so.batch_offset, so.batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
......
...@@ -258,6 +258,7 @@ static void mark_busy(struct drm_i915_private *i915) ...@@ -258,6 +258,7 @@ static void mark_busy(struct drm_i915_private *i915)
i915_update_gfx_val(i915); i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6) if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915); gen6_rps_busy(i915);
i915_pmu_gt_unparked(i915);
intel_engines_unpark(i915); intel_engines_unpark(i915);
...@@ -624,6 +625,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -624,6 +625,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret) if (ret)
goto err_unpin; goto err_unpin;
ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
/* Move the oldest request to the slab-cache (if not in use!) */ /* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests, req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link); typeof(*req), link);
...@@ -703,22 +708,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -703,22 +708,30 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
ret = engine->request_alloc(req); /*
if (ret) * Record the position of the start of the request so that
goto err_ctx;
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the * should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
* position of the head. * position of the head.
*/ */
req->head = req->ring->emit; req->head = req->ring->emit;
/* Unconditionally invalidate GPU caches and TLBs. */
ret = engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
goto err_unwind;
ret = engine->request_alloc(req);
if (ret)
goto err_unwind;
/* Check that we didn't interrupt ourselves with a new request */ /* Check that we didn't interrupt ourselves with a new request */
GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
return req; return req;
err_ctx: err_unwind:
req->ring->emit = req->head;
/* Make sure we didn't add ourselves to external state before freeing */ /* Make sure we didn't add ourselves to external state before freeing */
GEM_BUG_ON(!list_empty(&req->active_list)); GEM_BUG_ON(!list_empty(&req->active_list));
GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
......
...@@ -35,9 +35,9 @@ ...@@ -35,9 +35,9 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
{ {
switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) { switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
case MUTEX_TRYLOCK_RECURSIVE: case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false; *unlock = false;
return true; return true;
...@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) ...@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
preempt_disable(); preempt_disable();
do { do {
cpu_relax(); cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) { if (mutex_trylock(&i915->drm.struct_mutex)) {
*unlock = true; *unlock = true;
break; break;
} }
...@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) ...@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
BUG(); BUG();
} }
static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock) static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
{ {
if (!unlock) if (!unlock)
return; return;
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
} }
static bool swap_available(void) static bool swap_available(void)
...@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) ...@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
/** /**
* i915_gem_shrink - Shrink buffer object caches * i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device * @i915: i915 device
* @target: amount of memory to make available, in pages * @target: amount of memory to make available, in pages
* @nr_scanned: optional output for number of pages scanned (incremental) * @nr_scanned: optional output for number of pages scanned (incremental)
* @flags: control flags for selecting cache types * @flags: control flags for selecting cache types
...@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) ...@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
* The number of pages of backing storage actually released. * The number of pages of backing storage actually released.
*/ */
unsigned long unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv, i915_gem_shrink(struct drm_i915_private *i915,
unsigned long target, unsigned long target,
unsigned long *nr_scanned, unsigned long *nr_scanned,
unsigned flags) unsigned flags)
...@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
struct list_head *list; struct list_head *list;
unsigned int bit; unsigned int bit;
} phases[] = { } phases[] = {
{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, { &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 }, { NULL, 0 },
}, *phase; }, *phase;
unsigned long count = 0; unsigned long count = 0;
unsigned long scanned = 0; unsigned long scanned = 0;
bool unlock; bool unlock;
if (!shrinker_lock(dev_priv, &unlock)) if (!shrinker_lock(i915, &unlock))
return 0; return 0;
/* /*
...@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will free as much as we can and hope to get a second chance. * we will free as much as we can and hope to get a second chance.
*/ */
if (flags & I915_SHRINK_ACTIVE) if (flags & I915_SHRINK_ACTIVE)
i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(i915, target, flags);
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(i915);
/* /*
* Unbinding of objects will require HW access; Let us not wake the * Unbinding of objects will require HW access; Let us not wake the
...@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* we will force the wake during oom-notifier. * we will force the wake during oom-notifier.
*/ */
if ((flags & I915_SHRINK_BOUND) && if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(dev_priv)) !intel_runtime_pm_get_if_in_use(i915))
flags &= ~I915_SHRINK_BOUND; flags &= ~I915_SHRINK_BOUND;
/* /*
...@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* to be able to shrink their pages, so they remain on * to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed. * the unbound/bound list until actually freed.
*/ */
spin_lock(&dev_priv->mm.obj_lock); spin_lock(&i915->mm.obj_lock);
while (count < target && while (count < target &&
(obj = list_first_entry_or_null(phase->list, (obj = list_first_entry_or_null(phase->list,
typeof(*obj), typeof(*obj),
...@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!can_release_pages(obj)) if (!can_release_pages(obj))
continue; continue;
spin_unlock(&dev_priv->mm.obj_lock); spin_unlock(&i915->mm.obj_lock);
if (unsafe_drop_pages(obj)) { if (unsafe_drop_pages(obj)) {
/* May arrive from get_pages on another bo */ /* May arrive from get_pages on another bo */
...@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
} }
scanned += obj->base.size >> PAGE_SHIFT; scanned += obj->base.size >> PAGE_SHIFT;
spin_lock(&dev_priv->mm.obj_lock); spin_lock(&i915->mm.obj_lock);
} }
list_splice_tail(&still_in_list, phase->list); list_splice_tail(&still_in_list, phase->list);
spin_unlock(&dev_priv->mm.obj_lock); spin_unlock(&i915->mm.obj_lock);
} }
if (flags & I915_SHRINK_BOUND) if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(i915);
shrinker_unlock(dev_priv, unlock); shrinker_unlock(i915, unlock);
if (nr_scanned) if (nr_scanned)
*nr_scanned += scanned; *nr_scanned += scanned;
...@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
/** /**
* i915_gem_shrink_all - Shrink buffer object caches completely * i915_gem_shrink_all - Shrink buffer object caches completely
* @dev_priv: i915 device * @i915: i915 device
* *
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
* caches completely. It also first waits for and retires all outstanding * caches completely. It also first waits for and retires all outstanding
...@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
* Returns: * Returns:
* The number of pages of backing storage actually released. * The number of pages of backing storage actually released.
*/ */
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{ {
unsigned long freed; unsigned long freed;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(i915);
freed = i915_gem_shrink(dev_priv, -1UL, NULL, freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE); I915_SHRINK_ACTIVE);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
return freed; return freed;
} }
...@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker); container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed; unsigned long freed;
bool unlock; bool unlock;
sc->nr_scanned = 0; sc->nr_scanned = 0;
if (!shrinker_lock(dev_priv, &unlock)) if (!shrinker_lock(i915, &unlock))
return SHRINK_STOP; return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv, freed = i915_gem_shrink(i915,
sc->nr_to_scan, sc->nr_to_scan,
&sc->nr_scanned, &sc->nr_scanned,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE); I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan) if (freed < sc->nr_to_scan)
freed += i915_gem_shrink(dev_priv, freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned, sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned, &sc->nr_scanned,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) { if (freed < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(i915);
freed += i915_gem_shrink(dev_priv, freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned, sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned, &sc->nr_scanned,
I915_SHRINK_ACTIVE | I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND); I915_SHRINK_UNBOUND);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
} }
shrinker_unlock(dev_priv, unlock); shrinker_unlock(i915, unlock);
return sc->nr_scanned ? freed : SHRINK_STOP; return sc->nr_scanned ? freed : SHRINK_STOP;
} }
static bool static bool
shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock, shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
int timeout_ms) int timeout_ms)
{ {
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do { do {
if (i915_gem_wait_for_idle(dev_priv, 0) == 0 && if (i915_gem_wait_for_idle(i915, 0) == 0 &&
shrinker_lock(dev_priv, unlock)) shrinker_lock(i915, unlock))
break; break;
schedule_timeout_killable(1); schedule_timeout_killable(1);
...@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock, ...@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
static int static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.oom_notifier); container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages; unsigned long unevictable, bound, unbound, freed_pages;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(i915);
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
* being pointed to by hardware. * being pointed to by hardware.
*/ */
unbound = bound = unevictable = 0; unbound = bound = unevictable = 0;
spin_lock(&dev_priv->mm.obj_lock); spin_lock(&i915->mm.obj_lock);
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
unbound += obj->base.size >> PAGE_SHIFT; unbound += obj->base.size >> PAGE_SHIFT;
} }
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
if (!can_release_pages(obj)) if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT; unevictable += obj->base.size >> PAGE_SHIFT;
else else
bound += obj->base.size >> PAGE_SHIFT; bound += obj->base.size >> PAGE_SHIFT;
} }
spin_unlock(&dev_priv->mm.obj_lock); spin_unlock(&i915->mm.obj_lock);
if (freed_pages || unbound || bound) if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu pages freed, " pr_info("Purging GPU memory, %lu pages freed, "
...@@ -455,74 +455,74 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -455,74 +455,74 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
static int static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *i915 =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
unsigned long freed_pages = 0; unsigned long freed_pages = 0;
bool unlock; bool unlock;
int ret; int ret;
if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000)) if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
/* Force everything onto the inactive lists */ /* Force everything onto the inactive lists */
ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
if (ret) if (ret)
goto out; goto out;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(i915);
freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL, freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND | I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND | I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE | I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS); I915_SHRINK_VMAPS);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(i915);
/* We also want to clear any cached iomaps as they wrap vmap */ /* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next, list_for_each_entry_safe(vma, next,
&dev_priv->ggtt.base.inactive_list, vm_link) { &i915->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT; unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0) if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count; freed_pages += count;
} }
out: out:
shrinker_unlock(dev_priv, unlock); shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE; return NOTIFY_DONE;
} }
/** /**
* i915_gem_shrinker_init - Initialize i915 shrinker * i915_gem_shrinker_register - Register the i915 shrinker
* @dev_priv: i915 device * @i915: i915 device
* *
* This function registers and sets up the i915 shrinker and OOM handler. * This function registers and sets up the i915 shrinker and OOM handler.
*/ */
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) void i915_gem_shrinker_register(struct drm_i915_private *i915)
{ {
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; i915->mm.shrinker.seeks = DEFAULT_SEEKS;
dev_priv->mm.shrinker.batch = 4096; i915->mm.shrinker.batch = 4096;
WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); WARN_ON(register_shrinker(&i915->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
} }
/** /**
* i915_gem_shrinker_cleanup - Clean up i915 shrinker * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
* @dev_priv: i915 device * @i915: i915 device
* *
* This function unregisters the i915 shrinker and OOM handler. * This function unregisters the i915 shrinker and OOM handler.
*/ */
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
{ {
WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker); unregister_shrinker(&i915->mm.shrinker);
} }
...@@ -107,8 +107,8 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915) ...@@ -107,8 +107,8 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
} }
/** /**
* i915_gem_timelines_mark_idle -- called when the driver idles * i915_gem_timelines_park - called when the driver idles
* @i915 - the drm_i915_private device * @i915: the drm_i915_private device
* *
* When the driver is completely idle, we know that all of our sync points * When the driver is completely idle, we know that all of our sync points
* have been signaled and our tracking is then entirely redundant. Any request * have been signaled and our tracking is then entirely redundant. Any request
...@@ -116,7 +116,7 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915) ...@@ -116,7 +116,7 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
* the fence is signaled and therefore we will not even look them up in the * the fence is signaled and therefore we will not even look them up in the
* sync point map. * sync point map.
*/ */
void i915_gem_timelines_mark_idle(struct drm_i915_private *i915) void i915_gem_timelines_park(struct drm_i915_private *i915)
{ {
struct i915_gem_timeline *timeline; struct i915_gem_timeline *timeline;
int i; int i;
......
...@@ -93,7 +93,7 @@ int i915_gem_timeline_init(struct drm_i915_private *i915, ...@@ -93,7 +93,7 @@ int i915_gem_timeline_init(struct drm_i915_private *i915,
struct i915_gem_timeline *tl, struct i915_gem_timeline *tl,
const char *name); const char *name);
int i915_gem_timeline_init__global(struct drm_i915_private *i915); int i915_gem_timeline_init__global(struct drm_i915_private *i915);
void i915_gem_timelines_mark_idle(struct drm_i915_private *i915); void i915_gem_timelines_park(struct drm_i915_private *i915);
void i915_gem_timeline_fini(struct i915_gem_timeline *tl); void i915_gem_timeline_fini(struct i915_gem_timeline *tl);
static inline int __intel_timeline_sync_set(struct intel_timeline *tl, static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
......
...@@ -791,9 +791,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -791,9 +791,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
print_error_obj(m, dev_priv->engine[i], print_error_obj(m, dev_priv->engine[i],
"WA batchbuffer", ee->wa_batchbuffer); "WA batchbuffer", ee->wa_batchbuffer);
}
print_error_obj(m, NULL, "Semaphores", error->semaphore); print_error_obj(m, dev_priv->engine[i],
"NULL context", ee->default_state);
}
if (error->overlay) if (error->overlay)
intel_overlay_print_error_state(m, error->overlay); intel_overlay_print_error_state(m, error->overlay);
...@@ -903,8 +904,6 @@ void __i915_gpu_state_free(struct kref *error_ref) ...@@ -903,8 +904,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(ee->waiters); kfree(ee->waiters);
} }
i915_error_object_free(error->semaphore);
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
kfree(error->active_bo[i]); kfree(error->active_bo[i]);
kfree(error->pinned_bo); kfree(error->pinned_bo);
...@@ -1116,34 +1115,6 @@ gen8_engine_sync_index(struct intel_engine_cs *engine, ...@@ -1116,34 +1115,6 @@ gen8_engine_sync_index(struct intel_engine_cs *engine,
return idx; return idx;
} }
static void gen8_record_semaphore_state(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *to;
enum intel_engine_id id;
if (!error->semaphore)
return;
for_each_engine(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
if (engine == to)
continue;
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
tmp = error->semaphore->pages[0];
idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
}
}
static void gen6_record_semaphore_state(struct intel_engine_cs *engine, static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee) struct drm_i915_error_engine *ee)
{ {
...@@ -1218,7 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ...@@ -1218,7 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (INTEL_GEN(dev_priv) >= 6) { if (INTEL_GEN(dev_priv) >= 6) {
ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
if (INTEL_GEN(dev_priv) >= 8) { if (INTEL_GEN(dev_priv) >= 8) {
gen8_record_semaphore_state(error, engine, ee);
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
} else { } else {
gen6_record_semaphore_state(engine, ee); gen6_record_semaphore_state(engine, ee);
...@@ -1447,15 +1417,30 @@ static void request_record_user_bo(struct drm_i915_gem_request *request, ...@@ -1447,15 +1417,30 @@ static void request_record_user_bo(struct drm_i915_gem_request *request,
ee->user_bo_count = count; ee->user_bo_count = count;
} }
static struct drm_i915_error_object *
capture_object(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
.node = { .start = U64_MAX, .size = obj->base.size },
.size = obj->base.size,
.pages = obj->mm.pages,
.obj = obj,
};
return i915_error_object_create(dev_priv, &fake);
} else {
return NULL;
}
}
static void i915_gem_record_rings(struct drm_i915_private *dev_priv, static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error) struct i915_gpu_state *error)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
int i; int i;
error->semaphore =
i915_error_object_create(dev_priv, dev_priv->semaphore);
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = dev_priv->engine[i]; struct intel_engine_cs *engine = dev_priv->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i];
...@@ -1521,6 +1506,9 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, ...@@ -1521,6 +1506,9 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->wa_ctx = ee->wa_ctx =
i915_error_object_create(dev_priv, engine->wa_ctx.vma); i915_error_object_create(dev_priv, engine->wa_ctx.vma);
ee->default_state =
capture_object(dev_priv, engine->default_state);
} }
} }
......
...@@ -3068,7 +3068,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) ...@@ -3068,7 +3068,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv); i9xx_pipestat_irq_reset(dev_priv);
GEN3_IRQ_RESET(VLV_); GEN3_IRQ_RESET(VLV_);
dev_priv->irq_mask = ~0; dev_priv->irq_mask = ~0u;
} }
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
...@@ -3093,7 +3093,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3093,7 +3093,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT; I915_LPE_PIPE_C_INTERRUPT;
WARN_ON(dev_priv->irq_mask != ~0); WARN_ON(dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask; dev_priv->irq_mask = ~enable_mask;
......
...@@ -46,17 +46,6 @@ i915_param_named_unsafe(panel_ignore_lid, int, 0600, ...@@ -46,17 +46,6 @@ i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], " "Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)"); "-1=force lid closed, -2=force lid open)");
i915_param_named_unsafe(semaphores, int, 0400,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
i915_param_named_unsafe(enable_rc6, int, 0400,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
i915_param_named_unsafe(enable_dc, int, 0400, i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. " "Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
...@@ -99,10 +88,6 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400, ...@@ -99,10 +88,6 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. " "Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
i915_param_named_unsafe(enable_execlists, int, 0400,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
i915_param_named_unsafe(enable_psr, int, 0600, i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR " "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
......
...@@ -31,15 +31,12 @@ ...@@ -31,15 +31,12 @@
param(char *, vbt_firmware, NULL) \ param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \ param(int, modeset, -1) \
param(int, panel_ignore_lid, 1) \ param(int, panel_ignore_lid, 1) \
param(int, semaphores, -1) \
param(int, lvds_channel_mode, 0) \ param(int, lvds_channel_mode, 0) \
param(int, panel_use_ssc, -1) \ param(int, panel_use_ssc, -1) \
param(int, vbt_sdvo_panel_type, -1) \ param(int, vbt_sdvo_panel_type, -1) \
param(int, enable_rc6, -1) \
param(int, enable_dc, -1) \ param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \ param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \ param(int, enable_ppgtt, -1) \
param(int, enable_execlists, -1) \
param(int, enable_psr, -1) \ param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \ param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \ param(int, enable_ips, 1) \
......
...@@ -209,6 +209,8 @@ static const struct intel_device_info intel_gm45_info __initconst = { ...@@ -209,6 +209,8 @@ static const struct intel_device_info intel_gm45_info __initconst = {
.has_hotplug = 1, \ .has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \ .ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \ .has_snoop = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS CURSOR_OFFSETS
......
...@@ -1216,9 +1216,9 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1216,9 +1216,9 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{ {
struct drm_i915_private *dev_priv = stream->dev_priv; struct drm_i915_private *dev_priv = stream->dev_priv;
if (i915_modparams.enable_execlists) if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
else { } else {
struct intel_engine_cs *engine = dev_priv->engine[RCS]; struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring; struct intel_ring *ring;
int ret; int ret;
...@@ -1262,7 +1262,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) ...@@ -1262,7 +1262,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{ {
struct drm_i915_private *dev_priv = stream->dev_priv; struct drm_i915_private *dev_priv = stream->dev_priv;
if (i915_modparams.enable_execlists) { if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else { } else {
struct intel_engine_cs *engine = dev_priv->engine[RCS]; struct intel_engine_cs *engine = dev_priv->engine[RCS];
...@@ -1726,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr ...@@ -1726,10 +1726,9 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
GFP_KERNEL); GFP_KERNEL);
} }
ret = i915_switch_context(req);
i915_add_request(req); i915_add_request(req);
return ret; return 0;
} }
/* /*
...@@ -2691,8 +2690,8 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, ...@@ -2691,8 +2690,8 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{ {
return div_u64(1000000000ULL * (2ULL << exponent), return div64_u64(1000000000ULL * (2ULL << exponent),
dev_priv->perf.oa.timestamp_frequency); 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
} }
/** /**
...@@ -3007,7 +3006,7 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) ...@@ -3007,7 +3006,7 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
int i; int i;
for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) { for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
if (flex_eu_regs[i].reg == addr) if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
return true; return true;
} }
return false; return false;
...@@ -3015,38 +3014,47 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) ...@@ -3015,38 +3014,47 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
{ {
return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) || return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
(addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) || addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
(addr >= OACEC0_0.reg && addr <= OACEC7_1.reg); (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
(addr >= i915_mmio_reg_offset(OACEC0_0) &&
addr <= i915_mmio_reg_offset(OACEC7_1));
} }
static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{ {
return addr == HALF_SLICE_CHICKEN2.reg || return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
(addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) || (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
(addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) || addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
(addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg); (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
(addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
} }
static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{ {
return gen7_is_valid_mux_addr(dev_priv, addr) || return gen7_is_valid_mux_addr(dev_priv, addr) ||
addr == WAIT_FOR_RC6_EXIT.reg || addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
(addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg); (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
} }
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{ {
return gen8_is_valid_mux_addr(dev_priv, addr) || return gen8_is_valid_mux_addr(dev_priv, addr) ||
(addr >= OA_PERFCNT3_LO.reg && addr <= OA_PERFCNT4_HI.reg); (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
} }
static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{ {
return gen7_is_valid_mux_addr(dev_priv, addr) || return gen7_is_valid_mux_addr(dev_priv, addr) ||
(addr >= 0x25100 && addr <= 0x2FF90) || (addr >= 0x25100 && addr <= 0x2FF90) ||
(addr >= HSW_MBVID2_NOA0.reg && addr <= HSW_MBVID2_NOA9.reg) || (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
addr == HSW_MBVID2_MISR0.reg; addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
} }
static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
...@@ -3061,14 +3069,14 @@ static uint32_t mask_reg_value(u32 reg, u32 val) ...@@ -3061,14 +3069,14 @@ static uint32_t mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value * WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this. * programmed by userspace doesn't change this.
*/ */
if (HALF_SLICE_CHICKEN2.reg == reg) if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA * indicated by its name and a bunch of selection fields used by OA
* configs. * configs.
*/ */
if (WAIT_FOR_RC6_EXIT.reg == reg) if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val; return val;
...@@ -3415,8 +3423,6 @@ static struct ctl_table dev_root[] = { ...@@ -3415,8 +3423,6 @@ static struct ctl_table dev_root[] = {
*/ */
void i915_perf_init(struct drm_i915_private *dev_priv) void i915_perf_init(struct drm_i915_private *dev_priv)
{ {
dev_priv->perf.oa.timestamp_frequency = 0;
if (IS_HASWELL(dev_priv)) { if (IS_HASWELL(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg = dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr; gen7_is_valid_b_counter_addr;
...@@ -3432,10 +3438,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv) ...@@ -3432,10 +3438,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.oa_hw_tail_read = dev_priv->perf.oa.ops.oa_hw_tail_read =
gen7_oa_hw_tail_read; gen7_oa_hw_tail_read;
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.oa_formats = hsw_oa_formats; dev_priv->perf.oa.oa_formats = hsw_oa_formats;
} else if (i915_modparams.enable_execlists) { } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the /* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of * legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem * this driver, before upstreaming did this) it didn't seem
...@@ -3477,23 +3481,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) ...@@ -3477,23 +3481,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
} }
switch (dev_priv->info.platform) {
case INTEL_BROADWELL:
dev_priv->perf.oa.timestamp_frequency = 12500000;
break;
case INTEL_BROXTON:
case INTEL_GEMINILAKE:
dev_priv->perf.oa.timestamp_frequency = 19200000;
break;
case INTEL_SKYLAKE:
case INTEL_KABYLAKE:
case INTEL_COFFEELAKE:
dev_priv->perf.oa.timestamp_frequency = 12000000;
break;
default:
break;
}
} else if (IS_GEN10(dev_priv)) { } else if (IS_GEN10(dev_priv)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg = dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr; gen7_is_valid_b_counter_addr;
...@@ -3509,15 +3496,10 @@ void i915_perf_init(struct drm_i915_private *dev_priv) ...@@ -3509,15 +3496,10 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
/* Default frequency, although we need to read it from
* the register as it might vary between parts.
*/
dev_priv->perf.oa.timestamp_frequency = 12000000;
} }
} }
if (dev_priv->perf.oa.timestamp_frequency) { if (dev_priv->perf.oa.ops.enable_metric_set) {
hrtimer_init(&dev_priv->perf.oa.poll_check_timer, hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL); CLOCK_MONOTONIC, HRTIMER_MODE_REL);
dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb; dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
...@@ -3527,8 +3509,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv) ...@@ -3527,8 +3509,8 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->perf.lock); mutex_init(&dev_priv->perf.lock);
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = oa_sample_rate_hard_limit = 1000 *
dev_priv->perf.oa.timestamp_frequency / 2; (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock); mutex_init(&dev_priv->perf.metrics_lock);
......
此差异已折叠。
/*
* Copyright © 2017 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_PMU_H__
#define __I915_PMU_H__
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
__I915_NUM_PMU_SAMPLERS
};
/**
* How many different events we track in the global PMU mask.
*
* It is also used to know to needed number of event reference counters.
*/
#define I915_PMU_MASK_BITS \
((1 << I915_PMU_SAMPLE_BITS) + \
(I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
struct i915_pmu_sample {
u64 cur;
};
struct i915_pmu {
/**
* @node: List node for CPU hotplug handling.
*/
struct hlist_node node;
/**
* @base: PMU base.
*/
struct pmu base;
/**
* @lock: Lock protecting enable mask and ref count handling.
*/
spinlock_t lock;
/**
* @timer: Timer for internal i915 PMU sampling.
*/
struct hrtimer timer;
/**
* @enable: Bitmask of all currently enabled events.
*
* Bits are derived from uAPI event numbers in a way that low 16 bits
* correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
* bit 0), and higher bits correspond to other events (for instance
* I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
*
* In other words, low 16 bits are not per engine but per engine
* sampler type, while the upper bits are directly mapped to other
* event types.
*/
u64 enable;
/**
* @enable_count: Reference counts for the enabled events.
*
* Array indices are mapped in the same way as bits in the @enable field
* and they are used to control sampling on/off when multiple clients
* are using the PMU API.
*/
unsigned int enable_count[I915_PMU_MASK_BITS];
/**
* @timer_enabled: Should the internal sampling timer be running.
*/
bool timer_enabled;
/**
* @sample: Current and previous (raw) counters for sampling events.
*
* These counters are updated from the i915 PMU sampling timer.
*
* Only global counters are held here, while the per-engine ones are in
* struct intel_engine_cs.
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
};
#ifdef CONFIG_PERF_EVENTS
void i915_pmu_register(struct drm_i915_private *i915);
void i915_pmu_unregister(struct drm_i915_private *i915);
void i915_pmu_gt_parked(struct drm_i915_private *i915);
void i915_pmu_gt_unparked(struct drm_i915_private *i915);
#else
static inline void i915_pmu_register(struct drm_i915_private *i915) {}
static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {}
static inline void i915_pmu_gt_unparked(struct drm_i915_private *i915) {}
#endif
#endif
...@@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VIDEO_ENHANCEMENT_CLASS 2 #define VIDEO_ENHANCEMENT_CLASS 2
#define COPY_ENGINE_CLASS 3 #define COPY_ENGINE_CLASS 3
#define OTHER_CLASS 4 #define OTHER_CLASS 4
#define MAX_ENGINE_CLASS 4
#define MAX_ENGINE_INSTANCE 1
/* PCI config space */ /* PCI config space */
......
...@@ -42,14 +42,30 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) ...@@ -42,14 +42,30 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv, static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg) i915_reg_t reg)
{ {
return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg), u64 res;
1000);
intel_runtime_pm_get(dev_priv);
res = intel_rc6_residency_us(dev_priv, reg);
intel_runtime_pm_put(dev_priv);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
} }
static ssize_t static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%x\n", intel_rc6_enabled()); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
unsigned int mask;
mask = 0;
if (HAS_RC6(dev_priv))
mask |= BIT(0);
if (HAS_RC6p(dev_priv))
mask |= BIT(1);
if (HAS_RC6pp(dev_priv))
mask |= BIT(2);
return snprintf(buf, PAGE_SIZE, "%x\n", mask);
} }
static ssize_t static ssize_t
...@@ -252,14 +268,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ...@@ -252,14 +268,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else { } else {
u32 rpstat = I915_READ(GEN6_RPSTAT1); ret = intel_gpu_freq(dev_priv,
if (INTEL_GEN(dev_priv) >= 9) intel_get_cagf(dev_priv,
ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; I915_READ(GEN6_RPSTAT1)));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
ret = intel_gpu_freq(dev_priv, ret);
} }
mutex_unlock(&dev_priv->pcu_lock); mutex_unlock(&dev_priv->pcu_lock);
......
...@@ -1896,7 +1896,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) ...@@ -1896,7 +1896,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate); min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95); min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
......
...@@ -40,9 +40,9 @@ ...@@ -40,9 +40,9 @@
#define I915_CSR_CNL "i915/cnl_dmc_ver1_06.bin" #define I915_CSR_CNL "i915/cnl_dmc_ver1_06.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 6) #define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 6)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" #define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_KBL); MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_SKL);
......
...@@ -2098,6 +2098,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, ...@@ -2098,6 +2098,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_CANNONLAKE(dev_priv)) { if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0); val = I915_READ(DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port); val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port);
I915_WRITE(DPCLKA_CFGCR0, val); I915_WRITE(DPCLKA_CFGCR0, val);
...@@ -2513,17 +2514,17 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) ...@@ -2513,17 +2514,17 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600); udelay(600);
} }
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc) enum transcoder cpu_transcoder)
{ {
u32 temp; if (cpu_transcoder == TRANSCODER_EDP)
return false;
if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); return false;
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true; return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
} AUDIO_OUTPUT_ENABLE(cpu_transcoder);
return false;
} }
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
...@@ -2616,7 +2617,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, ...@@ -2616,7 +2617,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
} }
pipe_config->has_audio = pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, intel_crtc); intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
......
...@@ -1643,7 +1643,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -1643,7 +1643,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true; pipe_config->has_pch_encoder = true;
pipe_config->has_drrs = false; pipe_config->has_drrs = false;
if (port == PORT_A) if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false; pipe_config->has_audio = false;
else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
pipe_config->has_audio = intel_dp->has_audio; pipe_config->has_audio = intel_dp->has_audio;
...@@ -1677,6 +1677,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, ...@@ -1677,6 +1677,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode); conn_state->scaling_mode);
} }
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return false;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false; return false;
...@@ -4277,6 +4281,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -4277,6 +4281,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
{ {
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_connector_state *conn_state =
intel_dp->attached_connector->base.state;
u8 link_status[DP_LINK_STATUS_SIZE]; u8 link_status[DP_LINK_STATUS_SIZE];
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
...@@ -4286,10 +4292,16 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -4286,10 +4292,16 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return; return;
} }
if (!intel_encoder->base.crtc) if (!conn_state->crtc)
return;
WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex));
if (!conn_state->crtc->state->active)
return; return;
if (!to_intel_crtc(intel_encoder->base.crtc)->active) if (conn_state->commit &&
!try_wait_for_completion(&conn_state->commit->hw_done))
return; return;
/* /*
...@@ -4364,9 +4376,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) ...@@ -4364,9 +4376,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
} }
drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp); intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */ /* Send a Hotplug Uevent to userspace to start modeset */
...@@ -4814,8 +4825,19 @@ intel_dp_detect(struct drm_connector *connector, ...@@ -4814,8 +4825,19 @@ intel_dp_detect(struct drm_connector *connector,
connector->base.id, connector->name); connector->base.id, connector->name);
/* If full detect is not performed yet, do a full detect */ /* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done) if (!intel_dp->detect_done) {
struct drm_crtc *crtc;
int ret;
crtc = connector->state->crtc;
if (crtc) {
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
return ret;
}
status = intel_dp_long_pulse(intel_dp->attached_connector); status = intel_dp_long_pulse(intel_dp->attached_connector);
}
intel_dp->detect_done = false; intel_dp->detect_done = false;
...@@ -5097,7 +5119,38 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -5097,7 +5119,38 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
} }
if (!intel_dp->is_mst) { if (!intel_dp->is_mst) {
if (!intel_dp_short_pulse(intel_dp)) { struct drm_modeset_acquire_ctx ctx;
struct drm_connector *connector = &intel_dp->attached_connector->base;
struct drm_crtc *crtc;
int iret;
bool handled = false;
drm_modeset_acquire_init(&ctx, 0);
retry:
iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx);
if (iret)
goto err;
crtc = connector->state->crtc;
if (crtc) {
iret = drm_modeset_lock(&crtc->mutex, &ctx);
if (iret)
goto err;
}
handled = intel_dp_short_pulse(intel_dp);
err:
if (iret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry;
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
if (!handled) {
intel_dp->detect_done = false; intel_dp->detect_done = false;
goto put_power; goto put_power;
} }
...@@ -5131,8 +5184,11 @@ static void ...@@ -5131,8 +5184,11 @@ static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev);
enum port port = dp_to_dig_port(intel_dp)->base.port;
if (!IS_G4X(dev_priv) && port != PORT_A)
intel_attach_force_audio_property(connector);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector); intel_attach_broadcast_rgb_property(connector);
if (intel_dp_is_edp(intel_dp)) { if (intel_dp_is_edp(intel_dp)) {
...@@ -5306,6 +5362,12 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) ...@@ -5306,6 +5362,12 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/ */
final->t8 = 1; final->t8 = 1;
final->t9 = 1; final->t9 = 1;
/*
* HW has only a 100msec granularity for t11_t12 so round it up
* accordingly.
*/
final->t11_t12 = roundup(final->t11_t12, 100 * 10);
} }
static void static void
...@@ -6034,7 +6096,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -6034,7 +6096,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->interlace_allowed = true; if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
intel_dp_init_connector_port_info(intel_dig_port); intel_dp_init_connector_port_info(intel_dig_port);
......
...@@ -48,8 +48,9 @@ ...@@ -48,8 +48,9 @@
* having timed out, since the timeout could be due to preemption or similar and * having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout. * we've never had a chance to check the condition before the timeout.
*/ */
#define _wait_for(COND, US, W) ({ \ #define _wait_for(COND, US, Wmin, Wmax) ({ \
unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
int ret__; \ int ret__; \
might_sleep(); \ might_sleep(); \
for (;;) { \ for (;;) { \
...@@ -62,12 +63,14 @@ ...@@ -62,12 +63,14 @@
ret__ = -ETIMEDOUT; \ ret__ = -ETIMEDOUT; \
break; \ break; \
} \ } \
usleep_range((W), (W) * 2); \ usleep_range(wait__, wait__ * 2); \
if (wait__ < (Wmax)) \
wait__ <<= 1; \
} \ } \
ret__; \ ret__; \
}) })
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
...@@ -116,7 +119,7 @@ ...@@ -116,7 +119,7 @@
int ret__; \ int ret__; \
BUILD_BUG_ON(!__builtin_constant_p(US)); \ BUILD_BUG_ON(!__builtin_constant_p(US)); \
if ((US) > 10) \ if ((US) > 10) \
ret__ = _wait_for((COND), (US), 10); \ ret__ = _wait_for((COND), (US), 10, 10); \
else \ else \
ret__ = _wait_for_atomic((COND), (US), 0); \ ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \ ret__; \
...@@ -799,7 +802,6 @@ struct intel_crtc_state { ...@@ -799,7 +802,6 @@ struct intel_crtc_state {
struct intel_crtc { struct intel_crtc {
struct drm_crtc base; struct drm_crtc base;
enum pipe pipe; enum pipe pipe;
enum plane plane;
/* /*
* Whether the crtc and the connected output pipeline is active. Implies * Whether the crtc and the connected output pipeline is active. Implies
* that crtc->enabled is set, i.e. the current mode configuration has * that crtc->enabled is set, i.e. the current mode configuration has
...@@ -844,7 +846,7 @@ struct intel_crtc { ...@@ -844,7 +846,7 @@ struct intel_crtc {
struct intel_plane { struct intel_plane {
struct drm_plane base; struct drm_plane base;
u8 plane; enum i9xx_plane_id i9xx_plane;
enum plane_id id; enum plane_id id;
enum pipe pipe; enum pipe pipe;
bool can_scale; bool can_scale;
...@@ -866,6 +868,7 @@ struct intel_plane { ...@@ -866,6 +868,7 @@ struct intel_plane {
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane, void (*disable_plane)(struct intel_plane *plane,
struct intel_crtc *crtc); struct intel_crtc *crtc);
bool (*get_hw_state)(struct intel_plane *plane);
int (*check_plane)(struct intel_plane *plane, int (*check_plane)(struct intel_plane *plane,
struct intel_crtc_state *crtc_state, struct intel_crtc_state *crtc_state,
struct intel_plane_state *state); struct intel_plane_state *state);
...@@ -1129,7 +1132,7 @@ intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) ...@@ -1129,7 +1132,7 @@ intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
} }
static inline struct intel_crtc * static inline struct intel_crtc *
intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane) intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane)
{ {
return dev_priv->plane_to_crtc_mapping[plane]; return dev_priv->plane_to_crtc_mapping[plane];
} }
...@@ -1285,8 +1288,6 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state); ...@@ -1285,8 +1288,6 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder, void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config); struct intel_crtc_state *pipe_config);
...@@ -1485,6 +1486,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, ...@@ -1485,6 +1486,7 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc); bool intel_crtc_active(struct intel_crtc *crtc);
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port); enum intel_display_power_domain intel_port_to_power_domain(enum port port);
...@@ -1651,7 +1653,7 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) ...@@ -1651,7 +1653,7 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
/* intel_fbc.c */ /* intel_fbc.c */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state); struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv); bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
void intel_fbc_pre_update(struct intel_crtc *crtc, void intel_fbc_pre_update(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state, struct intel_crtc_state *crtc_state,
...@@ -1906,15 +1908,10 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, ...@@ -1906,15 +1908,10 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *ddb, const struct skl_ddb_entry *ddb,
int ignore); int ignore);
bool ilk_disable_lp_wm(struct drm_device *dev); bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate); struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv); void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv); void intel_enable_ipc(struct drm_i915_private *dev_priv);
static inline int intel_rc6_enabled(void)
{
return i915_modparams.enable_rc6;
}
/* intel_sdvo.c */ /* intel_sdvo.c */
bool intel_sdvo_init(struct drm_i915_private *dev_priv, bool intel_sdvo_init(struct drm_i915_private *dev_priv,
...@@ -1934,6 +1931,7 @@ void skl_update_plane(struct intel_plane *plane, ...@@ -1934,6 +1931,7 @@ void skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state); const struct intel_plane_state *plane_state);
void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
bool skl_plane_get_hw_state(struct intel_plane *plane);
/* intel_tv.c */ /* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv); void intel_tv_init(struct drm_i915_private *dev_priv);
......
...@@ -1670,7 +1670,7 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector) ...@@ -1670,7 +1670,7 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
enum plane plane; enum i9xx_plane_id plane;
u32 val; u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
......
...@@ -37,8 +37,6 @@ ...@@ -37,8 +37,6 @@
* Resource Streamer, is 66944 bytes, which rounds to 17 pages. * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/ */
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
...@@ -164,9 +162,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) ...@@ -164,9 +162,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 9: case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE; return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8: case 8:
return i915_modparams.enable_execlists ? return GEN8_LR_CONTEXT_RENDER_SIZE;
GEN8_LR_CONTEXT_RENDER_SIZE :
GEN8_CXT_TOTAL_SIZE;
case 7: case 7:
if (IS_HASWELL(dev_priv)) if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE; return HSW_CXT_TOTAL_SIZE;
...@@ -209,6 +205,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv, ...@@ -209,6 +205,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
class_info = &intel_engine_classes[info->class]; class_info = &intel_engine_classes[info->class];
if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
return -EINVAL;
GEM_BUG_ON(dev_priv->engine[id]); GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL); engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine) if (!engine)
...@@ -236,8 +241,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv, ...@@ -236,8 +241,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */ /* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL; engine->schedule = NULL;
spin_lock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine; dev_priv->engine[id] = engine;
return 0; return 0;
} }
...@@ -316,7 +324,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) ...@@ -316,7 +324,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class]; &intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine); int (*init)(struct intel_engine_cs *engine);
if (i915_modparams.enable_execlists) if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists; init = class_info->init_execlists;
else else
init = class_info->init_legacy; init = class_info->init_legacy;
...@@ -366,18 +374,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) ...@@ -366,18 +374,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
if (HAS_VEBOX(dev_priv)) if (HAS_VEBOX(dev_priv))
I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
} }
if (dev_priv->semaphore) {
struct page *page = i915_vma_first_page(dev_priv->semaphore);
void *semaphores;
/* Semaphores are in noncoherent memory, flush to be safe */
semaphores = kmap_atomic(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
kunmap_atomic(semaphores);
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
...@@ -1071,6 +1067,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -1071,6 +1067,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(dev_priv)) {
u32 val = I915_READ(GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
}
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES)); GEN8_LQSC_FLUSH_COHERENT_LINES));
...@@ -1188,7 +1193,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) ...@@ -1188,7 +1193,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
static int bxt_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
u32 val;
int ret; int ret;
ret = gen9_init_workarounds(engine); ret = gen9_init_workarounds(engine);
...@@ -1203,12 +1207,6 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) ...@@ -1203,12 +1207,6 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(FF_SLICE_CS_CHICKEN2, I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
val = I915_READ(GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
I915_WRITE(GEN8_L3SQCREG1, val);
/* WaToEnableHwFixForPushConstHWBug:bxt */ /* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
...@@ -1729,6 +1727,15 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1729,6 +1727,15 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
I915_READ(RING_MI_MODE(engine->mmio_base)), I915_READ(RING_MI_MODE(engine->mmio_base)),
I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
} }
if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
drm_printf(m, "\tSYNC_0: 0x%08x\n",
I915_READ(RING_SYNC_0(engine->mmio_base)));
drm_printf(m, "\tSYNC_1: 0x%08x\n",
I915_READ(RING_SYNC_1(engine->mmio_base)));
if (HAS_VEBOX(dev_priv))
drm_printf(m, "\tSYNC_2: 0x%08x\n",
I915_READ(RING_SYNC_2(engine->mmio_base)));
}
rcu_read_unlock(); rcu_read_unlock();
...@@ -1739,7 +1746,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1739,7 +1746,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr)); upper_32_bits(addr), lower_32_bits(addr));
if (i915_modparams.enable_execlists) { if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
u32 ptr, read, write; u32 ptr, read, write;
unsigned int idx; unsigned int idx;
...@@ -1823,6 +1830,114 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) ...@@ -1823,6 +1830,114 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\n"); drm_printf(m, "\n");
} }
static u8 user_class_map[] = {
[I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
[I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
[I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
[I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
};
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
if (class >= ARRAY_SIZE(user_class_map))
return NULL;
class = user_class_map[class];
GEM_BUG_ON(class > MAX_ENGINE_CLASS);
if (instance > MAX_ENGINE_INSTANCE)
return NULL;
return i915->engine_class[class][instance];
}
/**
* intel_enable_engine_stats() - Enable engine busy tracking on engine
* @engine: engine to enable stats collection
*
* Start collecting the engine busyness data for @engine.
*
* Returns 0 on success or a negative error code.
*/
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
unsigned long flags;
if (!intel_engine_supports_stats(engine))
return -ENODEV;
spin_lock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled == ~0)
goto busy;
if (engine->stats.enabled++ == 0)
engine->stats.enabled_at = ktime_get();
spin_unlock_irqrestore(&engine->stats.lock, flags);
return 0;
busy:
spin_unlock_irqrestore(&engine->stats.lock, flags);
return -EBUSY;
}
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
ktime_t total = engine->stats.total;
/*
* If the engine is executing something at the moment
* add it to the total.
*/
if (engine->stats.active)
total = ktime_add(total,
ktime_sub(ktime_get(), engine->stats.start));
return total;
}
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
{
ktime_t total;
unsigned long flags;
spin_lock_irqsave(&engine->stats.lock, flags);
total = __intel_engine_get_busy_time(engine);
spin_unlock_irqrestore(&engine->stats.lock, flags);
return total;
}
/**
* intel_disable_engine_stats() - Disable engine busy tracking on engine
* @engine: engine to disable stats collection
*
* Stops collecting the engine busyness data for @engine.
*/
void intel_disable_engine_stats(struct intel_engine_cs *engine)
{
unsigned long flags;
if (!intel_engine_supports_stats(engine))
return;
spin_lock_irqsave(&engine->stats.lock, flags);
WARN_ON_ONCE(engine->stats.enabled == 0);
if (--engine->stats.enabled == 0) {
engine->stats.total = __intel_engine_get_busy_time(engine);
engine->stats.active = 0;
}
spin_unlock_irqrestore(&engine->stats.lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c" #include "selftests/mock_engine.c"
#endif #endif
此差异已折叠。
...@@ -231,8 +231,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc) ...@@ -231,8 +231,7 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_rc6_enabled() || if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
action[1] = 0; action[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
#include "intel_guc_fwif.h" #include "intel_guc_fwif.h"
#include "intel_guc_ct.h" #include "intel_guc_ct.h"
#include "intel_guc_log.h" #include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_uc_fw.h" #include "intel_uc_fw.h"
#include "i915_guc_reg.h"
#include "i915_vma.h" #include "i915_vma.h"
struct guc_preempt_work { struct guc_preempt_work {
......
...@@ -30,14 +30,14 @@ ...@@ -30,14 +30,14 @@
#include "intel_guc_fw.h" #include "intel_guc_fw.h"
#include "i915_drv.h" #include "i915_drv.h"
#define SKL_FW_MAJOR 6 #define SKL_FW_MAJOR 9
#define SKL_FW_MINOR 1 #define SKL_FW_MINOR 33
#define BXT_FW_MAJOR 8 #define BXT_FW_MAJOR 9
#define BXT_FW_MINOR 7 #define BXT_FW_MINOR 29
#define KBL_FW_MAJOR 9 #define KBL_FW_MAJOR 9
#define KBL_FW_MINOR 14 #define KBL_FW_MINOR 39
#define GLK_FW_MAJOR 10 #define GLK_FW_MAJOR 10
#define GLK_FW_MINOR 56 #define GLK_FW_MINOR 56
...@@ -130,14 +130,14 @@ static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) ...@@ -130,14 +130,14 @@ static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_uc_fw *guc_fw = &guc->fw; struct intel_uc_fw *guc_fw = &guc->fw;
struct sg_table *sg = vma->pages; struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_MAX_COUNT]; u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i; int i;
if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
guc_fw->rsa_offset) != sizeof(rsa)) guc_fw->rsa_offset) != sizeof(rsa))
return -EINVAL; return -EINVAL;
for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++) for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
return 0; return 0;
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
* IN THE SOFTWARE. * IN THE SOFTWARE.
* *
*/ */
#ifndef _I915_GUC_REG_H_ #ifndef _INTEL_GUC_REG_H_
#define _I915_GUC_REG_H_ #define _INTEL_GUC_REG_H_
/* Definitions of GuC H/W registers, bits, etc */ /* Definitions of GuC H/W registers, bits, etc */
...@@ -52,7 +52,8 @@ ...@@ -52,7 +52,8 @@
#define SOFT_SCRATCH_COUNT 16 #define SOFT_SCRATCH_COUNT 16
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define UOS_RSA_SCRATCH_MAX_COUNT 64 #define UOS_RSA_SCRATCH_COUNT 64
#define DMA_ADDR_0_LOW _MMIO(0xc300) #define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_0_HIGH _MMIO(0xc304) #define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_LOW _MMIO(0xc308) #define DMA_ADDR_1_LOW _MMIO(0xc308)
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
* *
*/ */
#ifndef _I915_GUC_SUBMISSION_H_ #ifndef _INTEL_GUC_SUBMISSION_H_
#define _I915_GUC_SUBMISSION_H_ #define _INTEL_GUC_SUBMISSION_H_
#include <linux/spinlock.h> #include <linux/spinlock.h>
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册