提交 5e30ca1e 编写于 作者: L Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Intel and radeon fixes.

  Post KS/LC git requests from i915 and radeon stacked up.  They are all
  fixes along with some new pci ids for radeon, and one maintainers file
  entry.

   - i915: display fixes and irq fixes
   - radeon: pci ids, and misc gpuvm, dpm and hdp cache"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (29 commits)
  MAINTAINERS: Add entry for Renesas DRM drivers
  drm/radeon: add additional SI pci ids
  drm/radeon: add new bonaire pci ids
  drm/radeon: add new KV pci id
  Revert "drm/radeon: Use write-combined CPU mappings of ring buffers with PCIe"
  drm/radeon: fix active_cu mask on SI and CIK after re-init (v3)
  drm/radeon: fix active cu count for SI and CIK
  drm/radeon: re-enable selective GPUVM flushing
  drm/radeon: Sync ME and PFP after CP semaphore waits v4
  drm/radeon: fix display handling in radeon_gpu_reset
  drm/radeon: fix pm handling in radeon_gpu_reset
  drm/radeon: Only flush HDP cache for indirect buffers from userspace
  drm/radeon: properly document reloc priority mask
  drm/i915: don't try to retrain a DP link on an inactive CRTC
  drm/i915: make sure VDD is turned off during system suspend
  drm/i915: cancel hotplug and dig_port work during suspend and unload
  drm/i915: fix HPD IRQ reenable work cancelation
  drm/i915: take display port power domain in DP HPD handler
  drm/i915: Don't try to enable cursor from setplane when crtc is disabled
  drm/i915: Skip load detect when intel_crtc->new_enable==true
  ...
...@@ -3121,6 +3121,17 @@ F: include/linux/host1x.h ...@@ -3121,6 +3121,17 @@ F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
DRM DRIVERS FOR RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
L: linux-sh@vger.kernel.org
T: git git://people.freedesktop.org/~airlied/linux
S: Supported
F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
F: include/linux/platform_data/rcar-du.h
F: include/linux/platform_data/shmob_drm.h
DSBR100 USB FM RADIO DRIVER DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com> M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
......
...@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true; return true;
} }
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->long_hpd_port_mask = 0;
dev_priv->short_hpd_port_mask = 0;
dev_priv->hpd_event_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->dig_port_work);
cancel_work_sync(&dev_priv->hotplug_work);
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct drm_encoder *encoder;
drm_modeset_lock_all(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
if (intel_encoder->suspend)
intel_encoder->suspend(intel_encoder);
}
drm_modeset_unlock_all(dev);
}
static int i915_drm_freeze(struct drm_device *dev) static int i915_drm_freeze(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)
flush_delayed_work(&dev_priv->rps.delayed_resume_work); flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev); intel_runtime_pm_disable_interrupts(dev);
intel_hpd_cancel_work(dev_priv);
intel_suspend_encoders(dev_priv);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev);
......
...@@ -1458,7 +1458,7 @@ struct drm_i915_private { ...@@ -1458,7 +1458,7 @@ struct drm_i915_private {
} hpd_mark; } hpd_mark;
} hpd_stats[HPD_NUM_PINS]; } hpd_stats[HPD_NUM_PINS];
u32 hpd_event_bits; u32 hpd_event_bits;
struct timer_list hotplug_reenable_timer; struct delayed_work hotplug_reenable_work;
struct i915_fbc fbc; struct i915_fbc fbc;
struct i915_drrs drrs; struct i915_drrs drrs;
...@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); ...@@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work); extern void intel_console_resume(struct work_struct *work);
......
...@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
* some connectors */ * some connectors */
if (hpd_disabled) { if (hpd_disabled) {
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
mod_timer(&dev_priv->hotplug_reenable_timer, mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
} }
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
...@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
{
del_timer_sync(&dev_priv->hotplug_reenable_timer);
}
static void ironlake_rps_change_irq_handler(struct drm_device *dev) static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) ...@@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
if (!dev_priv) if (!dev_priv)
return; return;
intel_hpd_irq_uninstall(dev_priv);
gen8_irq_reset(dev); gen8_irq_reset(dev);
} }
...@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) ...@@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0); I915_WRITE(VLV_MASTER_IER, 0);
intel_hpd_irq_uninstall(dev_priv);
for_each_pipe(pipe) for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
...@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) ...@@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
if (!dev_priv) if (!dev_priv)
return; return;
intel_hpd_irq_uninstall(dev_priv);
ironlake_irq_reset(dev); ironlake_irq_reset(dev);
} }
...@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev) ...@@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
intel_hpd_irq_uninstall(dev_priv);
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
...@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev) ...@@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv) if (!dev_priv)
return; return;
intel_hpd_irq_uninstall(dev_priv);
I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
...@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev) ...@@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(IIR, I915_READ(IIR)); I915_WRITE(IIR, I915_READ(IIR));
} }
static void intel_hpd_irq_reenable(unsigned long data) static void intel_hpd_irq_reenable(struct work_struct *work)
{ {
struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags; unsigned long irqflags;
int i; int i;
intel_runtime_pm_get(dev_priv);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector; struct drm_connector *connector;
...@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data) ...@@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
intel_runtime_pm_put(dev_priv);
} }
void intel_irq_init(struct drm_device *dev) void intel_irq_init(struct drm_device *dev)
...@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev) ...@@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer, setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed, i915_hangcheck_elapsed,
(unsigned long) dev); (unsigned long) dev);
setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
(unsigned long) dev_priv); intel_hpd_irq_reenable);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
......
...@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force) ...@@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)
goto out; goto out;
} }
drm_modeset_acquire_init(&ctx, 0);
/* for pre-945g platforms use load detect */ /* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector)) if (intel_crt_detect_ddc(connector))
status = connector_status_connected; status = connector_status_connected;
else else
status = intel_crt_load_detect(crt); status = intel_crt_load_detect(crt);
intel_release_load_detect_pipe(connector, &tmp, &ctx); intel_release_load_detect_pipe(connector, &tmp);
} else } else
status = connector_status_unknown; status = connector_status_unknown;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
out: out:
intel_display_power_put(dev_priv, power_domain); intel_display_power_put(dev_priv, power_domain);
return status; return status;
......
...@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
connector->base.id, connector->name, connector->base.id, connector->name,
encoder->base.id, encoder->name); encoder->base.id, encoder->name);
drm_modeset_acquire_init(ctx, 0);
retry: retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx); ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret) if (ret)
...@@ -8502,10 +8500,14 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -8502,10 +8500,14 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
i++; i++;
if (!(encoder->possible_crtcs & (1 << i))) if (!(encoder->possible_crtcs & (1 << i)))
continue; continue;
if (!possible_crtc->enabled) { if (possible_crtc->enabled)
crtc = possible_crtc; continue;
break; /* This can occur when applying the pipe A quirk on resume. */
} if (to_intel_crtc(possible_crtc)->new_enabled)
continue;
crtc = possible_crtc;
break;
} }
/* /*
...@@ -8574,15 +8576,11 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -8574,15 +8576,11 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
goto retry; goto retry;
} }
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
return false; return false;
} }
void intel_release_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old, struct intel_load_detect_pipe *old)
struct drm_modeset_acquire_ctx *ctx)
{ {
struct intel_encoder *intel_encoder = struct intel_encoder *intel_encoder =
intel_attached_encoder(connector); intel_attached_encoder(connector);
...@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, ...@@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
drm_framebuffer_unreference(old->release_fb); drm_framebuffer_unreference(old->release_fb);
} }
goto unlock;
return; return;
} }
/* Switch crtc and encoder back off if necessary */ /* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON) if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode); connector->funcs->dpms(connector, old->dpms_mode);
unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
} }
static int i9xx_pll_refclk(struct drm_device *dev, static int i9xx_pll_refclk(struct drm_device *dev,
...@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
}; };
const struct drm_rect clip = { const struct drm_rect clip = {
/* integer pixels */ /* integer pixels */
.x2 = intel_crtc->config.pipe_src_w, .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->config.pipe_src_h, .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
}; };
bool visible; bool visible;
int ret; int ret;
...@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) ...@@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)
struct intel_connector *connector; struct intel_connector *connector;
struct drm_connector *crt = NULL; struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp; struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
/* We can't just switch on the pipe A, we need to set things up with a /* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A * proper mode and output configuration. As a gross hack, enable pipe A
...@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev) ...@@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)
if (!crt) if (!crt)
return; return;
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); intel_release_load_detect_pipe(crt, &load_detect_temp);
} }
static bool static bool
...@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev) ...@@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* experience fancy races otherwise. * experience fancy races otherwise.
*/ */
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work); intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true; dev_priv->pm._irqs_disabled = true;
/* /*
......
...@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (WARN_ON(!intel_encoder->base.crtc)) if (WARN_ON(!intel_encoder->base.crtc))
return; return;
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
/* Try to read receiver status if the link appears to be up */ /* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp, link_status)) { if (!intel_dp_get_link_status(intel_dp, link_status)) {
return; return;
...@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) ...@@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port); kfree(intel_dig_port);
} }
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
if (!is_edp(intel_dp))
return;
edp_panel_vdd_off_sync(intel_dp);
}
static void intel_dp_encoder_reset(struct drm_encoder *encoder) static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{ {
intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
...@@ -4037,15 +4050,21 @@ bool ...@@ -4037,15 +4050,21 @@ bool
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{ {
struct intel_dp *intel_dp = &intel_dig_port->dp; struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; enum intel_display_power_domain power_domain;
bool ret = true;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
long_hpd ? "long" : "short"); long_hpd ? "long" : "short");
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) { if (long_hpd) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
goto mst_fail; goto mst_fail;
...@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
} else { } else {
if (intel_dp->is_mst) { if (intel_dp->is_mst) {
ret = intel_dp_check_mst_status(intel_dp); if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
if (ret == -EINVAL)
goto mst_fail; goto mst_fail;
} }
...@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex);
} }
} }
return false; ret = false;
goto put_power;
mst_fail: mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */ /* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) { if (intel_dp->is_mst) {
...@@ -4084,7 +4103,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -4084,7 +4103,10 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false; intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
} }
return true; put_power:
intel_display_power_put(dev_priv, power_domain);
return ret;
} }
/* Return which DP Port should be selected for Transcoder DP control */ /* Return which DP Port should be selected for Transcoder DP control */
...@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) ...@@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->disable = intel_disable_dp; intel_encoder->disable = intel_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config; intel_encoder->get_config = intel_dp_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev)) {
intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp; intel_encoder->pre_enable = chv_pre_enable_dp;
......
...@@ -153,6 +153,12 @@ struct intel_encoder { ...@@ -153,6 +153,12 @@ struct intel_encoder {
* be set correctly before calling this function. */ * be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *, void (*get_config)(struct intel_encoder *,
struct intel_crtc_config *pipe_config); struct intel_crtc_config *pipe_config);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
int crtc_mask; int crtc_mask;
enum hpd_pin hpd_pin; enum hpd_pin hpd_pin;
}; };
...@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old, struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old, struct intel_load_detect_pipe *old);
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_device *dev, int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct intel_engine_cs *pipelined); struct intel_engine_cs *pipelined);
......
...@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force) ...@@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp; struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
drm_modeset_acquire_init(&ctx, 0);
if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {
type = intel_tv_detect_type(intel_tv, connector); type = intel_tv_detect_type(intel_tv, connector);
intel_release_load_detect_pipe(connector, &tmp, &ctx); intel_release_load_detect_pipe(connector, &tmp);
} else } else
return connector_status_unknown; return connector_status_unknown;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
} else } else
return connector->status; return connector->status;
......
...@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ ...@@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
......
...@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev, ...@@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
WREG32_SMC(CG_THERMAL_CTRL, tmp); WREG32_SMC(CG_THERMAL_CTRL, tmp);
#endif #endif
rdev->pm.dpm.thermal.min_temp = low_temp;
rdev->pm.dpm.thermal.max_temp = high_temp;
return 0; return 0;
} }
......
...@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev) ...@@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
u32 mc_shared_chmap, mc_arb_ramcfg; u32 mc_shared_chmap, mc_arb_ramcfg;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 tmp; u32 tmp;
int i, j, k; int i, j;
switch (rdev->family) { switch (rdev->family) {
case CHIP_BONAIRE: case CHIP_BONAIRE:
...@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev) ...@@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x130B) || (rdev->pdev->device == 0x130B) ||
(rdev->pdev->device == 0x130E) || (rdev->pdev->device == 0x130E) ||
(rdev->pdev->device == 0x1315) || (rdev->pdev->device == 0x1315) ||
(rdev->pdev->device == 0x1318) ||
(rdev->pdev->device == 0x131B)) { (rdev->pdev->device == 0x131B)) {
rdev->config.cik.max_cu_per_sh = 4; rdev->config.cik.max_cu_per_sh = 4;
rdev->config.cik.max_backends_per_se = 1; rdev->config.cik.max_backends_per_se = 1;
...@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev) ...@@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)
rdev->config.cik.max_sh_per_se, rdev->config.cik.max_sh_per_se,
rdev->config.cik.max_backends_per_se); rdev->config.cik.max_backends_per_se);
rdev->config.cik.active_cus = 0;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { rdev->config.cik.active_cus +=
rdev->config.cik.active_cus += hweight32(cik_get_cu_active_bitmap(rdev, i, j));
hweight32(cik_get_cu_active_bitmap(rdev, i, j));
}
} }
} }
...@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
...@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, ...@@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
} }
/**
* cik_semaphore_ring_emit - emit a semaphore on the CP ring
*
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
* @emit_wait: Is this a sempahore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
*/
bool cik_semaphore_ring_emit(struct radeon_device *rdev, bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
...@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
return true; return true;
} }
...@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, ...@@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
...@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_scratch_free(rdev, scratch); radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
...@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) ...@@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return 0; return 0;
} }
...@@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5958,14 +5975,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* update SH_MEM_* regs */ /* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, VMID(vm->id)); radeon_ring_write(ring, VMID(vm->id));
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SH_MEM_BASES >> 2); radeon_ring_write(ring, SH_MEM_BASES >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
...@@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5976,7 +5993,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
...@@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5987,7 +6004,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
......
...@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
...@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev, ...@@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 1); /* number of DWs to follow */
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = readl(ptr);
...@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[4] = 0xDEADBEEF; ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5; ib.length_dw = 5;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
......
...@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) ...@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff; cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me); WREG32(CP_ME_CNTL, cp_me);
...@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) ...@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */ radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return 0; return 0;
} }
......
...@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, ...@@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
......
...@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) ...@@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
return kv_enable_uvd_dpm(rdev, !gate); return kv_enable_uvd_dpm(rdev, !gate);
} }
static u8 kv_get_vce_boot_level(struct radeon_device *rdev) static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)
{ {
u8 i; u8 i;
struct radeon_vce_clock_voltage_dependency_table *table = struct radeon_vce_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
for (i = 0; i < table->count; i++) { for (i = 0; i < table->count; i++) {
if (table->entries[i].evclk >= 0) /* XXX */ if (table->entries[i].evclk >= evclk)
break; break;
} }
...@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev, ...@@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,
if (pi->caps_stable_p_state) if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1; pi->vce_boot_level = table->count - 1;
else else
pi->vce_boot_level = kv_get_vce_boot_level(rdev); pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);
ret = kv_copy_bytes_to_smc(rdev, ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start + pi->dpm_table_start +
...@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev) ...@@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true; pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true; pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false; pi->disable_nb_ps3_in_battery = false;
pi->bapm_enable = true; if (radeon_bapm == 0)
pi->bapm_enable = false;
else
pi->bapm_enable = true;
pi->voltage_drop_t = 0; pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false; pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */ pi->caps_fps = false; /* true? */
......
...@@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev) ...@@ -1505,7 +1505,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cayman_cp_enable(rdev, true); cayman_cp_enable(rdev, true);
...@@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev) ...@@ -1547,7 +1547,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */ radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
/* XXX init other rings */ /* XXX init other rings */
......
...@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return r; return r;
} }
...@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
RADEON_ISYNC_ANY3D_IDLE2D | RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI | RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI); RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
...@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
} }
radeon_ring_write(ring, PACKET0(scratch, 0)); radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) { if (tmp == 0xDEADBEEF) {
...@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0); ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0); ib.ptr[7] = PACKET2(0);
ib.length_dw = 8; ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib; goto free_ib;
......
...@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,
if (fence) { if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
return r; return r;
} }
......
...@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST | R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST); R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
static void r300_errata(struct radeon_device *rdev) static void r300_errata(struct radeon_device *rdev)
......
...@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev) ...@@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch); radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
static void r420_cp_errata_fini(struct radeon_device *rdev) static void r420_cp_errata_fini(struct radeon_device *rdev)
...@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev) ...@@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
radeon_ring_lock(rdev, ring, 8); radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH); radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
} }
......
...@@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev) ...@@ -2547,7 +2547,7 @@ int r600_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
cp_me = 0xff; cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me); WREG32(R_0086D8_CP_ME_CNTL, cp_me);
...@@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -2683,7 +2683,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch); tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
...@@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2753,6 +2753,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} }
} }
/**
* r600_semaphore_ring_emit - emit a semaphore on the CP ring
*
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @semaphore: radeon semaphore object
* @emit_wait: Is this a sempahore wait?
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
*/
bool r600_semaphore_ring_emit(struct radeon_device *rdev, bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
...@@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -2768,6 +2779,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
/* PFP_SYNC_ME packet only exists on 7xx+ */
if (emit_wait && (rdev->family >= CHIP_RV770)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
return true; return true;
} }
...@@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, ...@@ -2845,7 +2863,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
...@@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -3165,7 +3183,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib; goto free_ib;
......
...@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev, ...@@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr); tmp = readl(ptr);
...@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[3] = 0xDEADBEEF; ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4; ib.length_dw = 4;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
...@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev, ...@@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
......
...@@ -1597,6 +1597,7 @@ ...@@ -1597,6 +1597,7 @@
*/ */
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43 #define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ # define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
......
...@@ -105,6 +105,7 @@ extern int radeon_vm_size; ...@@ -105,6 +105,7 @@ extern int radeon_vm_size;
extern int radeon_vm_block_size; extern int radeon_vm_block_size;
extern int radeon_deep_color; extern int radeon_deep_color;
extern int radeon_use_pflipirq; extern int radeon_use_pflipirq;
extern int radeon_bapm;
/* /*
* Copy from radeon_drv.h so we don't have to include both and have conflicting * Copy from radeon_drv.h so we don't have to include both and have conflicting
...@@ -967,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ...@@ -967,7 +968,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
unsigned size); unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib); struct radeon_ib *const_ib, bool hdp_flush);
int radeon_ib_pool_init(struct radeon_device *rdev); int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev); int radeon_ib_ring_tests(struct radeon_device *rdev);
...@@ -977,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, ...@@ -977,8 +978,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); bool hdp_flush);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
bool hdp_flush);
void radeon_ring_undo(struct radeon_ring *ring); void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
......
...@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
* the buffers used for read only, which doubles the range * the buffers used for read only, which doubles the range
* to 0 to 31. 32 is reserved for the kernel driver. * to 0 to 31. 32 is reserved for the kernel driver.
*/ */
priority = (r->flags & 0xf) * 2 + !!r->write_domain; priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in /* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards to avoid VRAM, also but everything into VRAM on AGP cards to avoid
...@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, ...@@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
radeon_vce_note_usage(rdev); radeon_vce_note_usage(rdev);
radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL); r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) { if (r) {
DRM_ERROR("Failed to schedule IB !\n"); DRM_ERROR("Failed to schedule IB !\n");
} }
...@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else { } else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL); r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
} }
out: out:
......
...@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev); radeon_suspend(rdev);
radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
...@@ -1726,9 +1726,39 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1726,9 +1726,39 @@ int radeon_gpu_reset(struct radeon_device *rdev)
} }
} }
radeon_pm_resume(rdev); if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
} else {
/* resume old pm late */
radeon_pm_resume(rdev);
}
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
/* turn on the BL */
if (rdev->mode_info.bl_encoder) {
u8 bl_level = radeon_get_backlight_level(rdev,
rdev->mode_info.bl_encoder);
radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
bl_level);
}
}
/* reset hpd state */
radeon_hpd_init(rdev);
drm_helper_resume_force_mode(rdev->ddev); drm_helper_resume_force_mode(rdev->ddev);
/* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) { if (r) {
/* bad news, how to tell it to userspace ? */ /* bad news, how to tell it to userspace ? */
......
...@@ -180,6 +180,7 @@ int radeon_vm_size = 8; ...@@ -180,6 +180,7 @@ int radeon_vm_size = 8;
int radeon_vm_block_size = -1; int radeon_vm_block_size = -1;
int radeon_deep_color = 0; int radeon_deep_color = 0;
int radeon_use_pflipirq = 2; int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444); module_param_named(no_wb, radeon_no_wb, int, 0444);
...@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444); ...@@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);
MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))"); MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, radeon_bapm, int, 0444);
static struct pci_device_id pciidlist[] = { static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS radeon_PCI_IDS
}; };
......
...@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ib: IB object to schedule * @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only) * @const_ib: Const IB to schedule (SI only)
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Schedule an IB on the associated ring (all asics). * Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure. * Returns 0 on success, error on failure.
...@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
* to SI there was just a DE IB. * to SI there was just a DE IB.
*/ */
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib) struct radeon_ib *const_ib, bool hdp_flush)
{ {
struct radeon_ring *ring = &rdev->ring[ib->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
int r = 0; int r = 0;
...@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) if (ib->vm)
radeon_vm_fence(rdev, ib->vm, ib->fence); radeon_vm_fence(rdev, ib->vm, ib->fence);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, hdp_flush);
return 0; return 0;
} }
......
...@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev, ...@@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
...@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev, ...@@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private; struct radeon_device *rdev = ddev->dev_private;
/* Can't set dpm state when the card is off */
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0) if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
...@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev, ...@@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
goto fail; goto fail;
} }
mutex_unlock(&rdev->pm.mutex); mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
/* Can't set dpm state when the card is off */
if (!(rdev->flags & RADEON_IS_PX) ||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
radeon_pm_compute_clocks(rdev);
fail: fail:
return count; return count;
} }
......
...@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig ...@@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information * @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Update the wptr (write pointer) to tell the GPU to * Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics). * execute new commands on the ring buffer (all asics).
*/ */
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{ {
/* If we are emitting the HDP flush via the ring buffer, we need to /* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding. * do it before padding.
*/ */
if (rdev->asic->ring[ring->idx]->hdp_flush) if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */ /* We pad to match fetch size */
while (ring->wptr & ring->align_mask) { while (ring->wptr & ring->align_mask) {
...@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
/* If we are emitting the HDP flush via MMIO, we need to do it after /* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished. * all CPU writes to VRAM finished.
*/ */
if (rdev->asic->mmio_hdp_flush) if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev); rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring); radeon_ring_set_wptr(rdev, ring);
} }
...@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information * @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
* *
* Call radeon_ring_commit() then unlock the ring (all asics). * Call radeon_ring_commit() then unlock the ring (all asics).
*/ */
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
{ {
radeon_ring_commit(rdev, ring); radeon_ring_commit(rdev, ring, hdp_flush);
mutex_unlock(&rdev->ring_lock); mutex_unlock(&rdev->ring_lock);
} }
...@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, ...@@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, data[i]); radeon_ring_write(ring, data[i]);
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
kfree(data); kfree(data);
return 0; return 0;
} }
...@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig ...@@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
/* Allocate ring buffer */ /* Allocate ring buffer */
if (ring->ring_obj == NULL) { if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_GTT, 0,
(rdev->flags & RADEON_IS_PCIE) ?
RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj); NULL, &ring->ring_obj);
if (r) { if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r); dev_err(rdev->dev, "(%d) ring create failed\n", r);
......
...@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, ...@@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
continue; continue;
} }
radeon_ring_commit(rdev, &rdev->ring[i]); radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring); radeon_fence_note_sync(fence, ring);
semaphore->gpu_addr += 8; semaphore->gpu_addr += 8;
......
...@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev, ...@@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
return r; return r;
} }
radeon_fence_emit(rdev, fence, ring->idx); radeon_fence_emit(rdev, fence, ring->idx);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
return 0; return 0;
} }
...@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
if (r) if (r)
...@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
if (r) if (r)
...@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence1, false); r = radeon_fence_wait(fence1, false);
if (r) { if (r) {
...@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, ...@@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_fence_wait(fence2, false); r = radeon_fence_wait(fence2, false);
if (r) { if (r) {
...@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringA); radeon_ring_unlock_commit(rdev, ringA, false);
r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
if (r) if (r)
...@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB); radeon_ring_unlock_commit(rdev, ringB, false);
r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
if (r) if (r)
goto out_cleanup; goto out_cleanup;
...@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC); radeon_ring_unlock_commit(rdev, ringC, false);
for (i = 0; i < 30; ++i) { for (i = 0; i < 30; ++i) {
mdelay(100); mdelay(100);
...@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev, ...@@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup; goto out_cleanup;
} }
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC); radeon_ring_unlock_commit(rdev, ringC, false);
mdelay(1000); mdelay(1000);
......
...@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ...@@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
ib.ptr[i] = PACKET2(0); ib.ptr[i] = PACKET2(0);
ib.length_dw = 16; ib.length_dw = 16;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) if (r)
goto err; goto err;
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
......
...@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, ...@@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i) for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0; ib.ptr[i] = 0x0;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
} }
...@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, ...@@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
for (i = ib.length_dw; i < ib_size_dw; ++i) for (i = ib.length_dw; i < ib_size_dw; ++i)
ib.ptr[i] = 0x0; ib.ptr[i] = 0x0;
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
} }
...@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r; return r;
} }
radeon_ring_write(ring, VCE_CMD_END); radeon_ring_write(ring, VCE_CMD_END);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
if (vce_v1_0_get_rptr(rdev, ring) != rptr) if (vce_v1_0_get_rptr(rdev, ring) != rptr)
......
...@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib); radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > 64); WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) if (r)
goto error; goto error;
...@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, ...@@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
/* add a clone of the bo_va to clear the old address */ /* add a clone of the bo_va to clear the old address */
struct radeon_bo_va *tmp; struct radeon_bo_va *tmp;
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (!tmp) {
mutex_unlock(&vm->mutex);
return -ENOMEM;
}
tmp->it.start = bo_va->it.start; tmp->it.start = bo_va->it.start;
tmp->it.last = bo_va->it.last; tmp->it.last = bo_va->it.last;
tmp->vm = vm; tmp->vm = vm;
...@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ...@@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
return r; return r;
...@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence); radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
return r; return r;
......
...@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0)); radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
int rv515_mc_wait_for_idle(struct radeon_device *rdev) int rv515_mc_wait_for_idle(struct radeon_device *rdev)
......
...@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev, ...@@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
......
...@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev) ...@@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)
u32 sx_debug_1; u32 sx_debug_1;
u32 hdp_host_path_cntl; u32 hdp_host_path_cntl;
u32 tmp; u32 tmp;
int i, j, k; int i, j;
switch (rdev->family) { switch (rdev->family) {
case CHIP_TAHITI: case CHIP_TAHITI:
...@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev) ...@@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se, rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh); rdev->config.si.max_cu_per_sh);
rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) { for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { rdev->config.si.active_cus +=
rdev->config.si.active_cus += hweight32(si_get_cu_active_bitmap(rdev, i, j));
hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
} }
} }
...@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev) ...@@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000); radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000); radeon_ring_write(ring, 0xe000);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
si_cp_enable(rdev, true); si_cp_enable(rdev, true);
...@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev) ...@@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i]; ring = &rdev->ring[i];
...@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev) ...@@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
} }
return 0; return 0;
...@@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5028,7 +5027,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* flush hdp cache */ /* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
...@@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -5036,7 +5035,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
......
...@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, *fence);
return r; return r;
......
...@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev) ...@@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT; pi->at[i] = TRINITY_AT_DFLT;
/* There are stability issues reported on with if (radeon_bapm == -1) {
* bapm enabled when switching between AC and battery /* There are stability issues reported on with
* power. At the same time, some MSI boards hang * bapm enabled when switching between AC and battery
* if it's not enabled and dpm is enabled. Just enable * power. At the same time, some MSI boards hang
* it for MSI boards right now. * if it's not enabled and dpm is enabled. Just enable
*/ * it for MSI boards right now.
if (rdev->pdev->subsystem_vendor == 0x1462) */
pi->enable_bapm = true; if (rdev->pdev->subsystem_vendor == 0x1462)
else pi->enable_bapm = true;
else
pi->enable_bapm = false;
} else if (radeon_bapm == 0) {
pi->enable_bapm = false; pi->enable_bapm = false;
} else {
pi->enable_bapm = true;
}
pi->enable_nbps_policy = true; pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true; pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true; pi->enable_gfx_power_gating = true;
......
...@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev) ...@@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
radeon_ring_write(ring, 3); radeon_ring_write(ring, 3);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
done: done:
/* lower clocks again */ /* lower clocks again */
...@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
} }
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) { for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(UVD_CONTEXT_ID); tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF) if (tmp == 0xDEADBEEF)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
...@@ -164,8 +165,11 @@ ...@@ -164,8 +165,11 @@
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
...@@ -175,6 +179,8 @@ ...@@ -175,6 +179,8 @@
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
...@@ -297,6 +303,7 @@ ...@@ -297,6 +303,7 @@
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
......
...@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk { ...@@ -944,6 +944,7 @@ struct drm_radeon_cs_chunk {
}; };
/* drm_radeon_cs_reloc.flags */ /* drm_radeon_cs_reloc.flags */
#define RADEON_RELOC_PRIO_MASK (0xf << 0)
struct drm_radeon_cs_reloc { struct drm_radeon_cs_reloc {
uint32_t handle; uint32_t handle;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册