提交 6bb340c7 编写于 作者: L Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Just regular fixes, bunch from intel, quieting some of the over
  zealous power warnings, and the rest just misc.

  I've got another pull with the remaining dma-buf bits, since the vmap
  bits are in your tree now.  I'll send tomorrow just to space things
  out a bit."

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (22 commits)
  drm/edid/quirks: ViewSonic VA2026w
  drm/udl: remove unused variables.
  drm/radeon: fix XFX quirk
  drm: Use stdint types for consistency
  drm: Constify params to format_check() and framebuffer_checks()
  drm/radeon: fix typo in trinity tiling setup
  drm/udl: unlock before returning in udl_gem_mmap()
  radeon: make radeon_cs_update_pages static.
  drm/i915: tune down the noise of the RP irq limit fail
  drm/i915: Remove the error message for unbinding pinned buffers
  drm/i915: Limit page allocations to lowmem (dma32) for i965
  drm/i915: always use RPNSWREQ for turbo change requests
  drm/i915: reject doubleclocked cea modes on dp
  drm/i915: Adding TV Out Missing modes.
  drm/i915: wait for a vblank to pass after tv detect
  drm/i915: no lvds quirk for HP t5740e Thin Client
  drm/i915: enable vdd when switching off the eDP panel
  drm/i915: Fix PCH PLL assertions to not assume CRTC:PLL relationship
  drm/i915: Always update RPS interrupts thresholds along with frequency
  drm/i915: properly handle interlaced bit for sdvo dtd conversion
  ...
......@@ -2116,7 +2116,7 @@ int drm_mode_addfb(struct drm_device *dev,
return ret;
}
static int format_check(struct drm_mode_fb_cmd2 *r)
static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
......@@ -2185,7 +2185,7 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
......@@ -3126,7 +3126,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
static bool drm_property_change_is_valid(struct drm_property *property,
__u64 value)
uint64_t value)
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
......@@ -3136,7 +3136,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
return true;
} else if (property->flags & DRM_MODE_PROP_BITMASK) {
int i;
__u64 valid_mask = 0;
uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
......
......@@ -66,6 +66,8 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
......@@ -120,6 +122,9 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
......@@ -885,12 +890,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
goto set_size;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_DRIVER;
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
......@@ -914,8 +926,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_do_interlace_quirk(mode, pt);
drm_mode_set_name(mode);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
......@@ -925,6 +935,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
......@@ -938,6 +949,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
mode->type = DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
return mode;
}
......
......@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
1, minor);
}
#endif /* CONFIG_DEBUG_FS */
......@@ -2063,10 +2063,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->gtt_space == NULL)
return 0;
if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
if (obj->pin_count)
return -EBUSY;
ret = i915_gem_object_finish_gpu(obj);
if (ret)
......@@ -3293,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
......@@ -3303,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
}
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
mapping_set_gfp_mask(mapping, mask);
i915_gem_info_add_obj(dev_priv, size);
......
......@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
......@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
if (!pm_iir)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (dev_priv->cur_delay != dev_priv->max_delay)
new_delay = dev_priv->cur_delay + 1;
if (new_delay > dev_priv->max_delay)
new_delay = dev_priv->max_delay;
} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
gen6_gt_force_wake_get(dev_priv);
if (dev_priv->cur_delay != dev_priv->min_delay)
new_delay = dev_priv->cur_delay - 1;
if (new_delay < dev_priv->min_delay) {
new_delay = dev_priv->min_delay;
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
((new_delay << 16) & 0x3f0000));
} else {
/* Make sure we continue to get down interrupts
* until we hit the minimum frequency */
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
}
gen6_gt_force_wake_put(dev_priv);
}
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->cur_delay + 1;
else
new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
dev_priv->cur_delay = new_delay;
/*
* rps_lock not held here because clearing is non-destructive. There is
* an *extremely* unlikely race with gen6_rps_enable() that is prevented
* by holding struct_mutex for the duration of the write.
*/
mutex_unlock(&dev_priv->dev->struct_mutex);
}
......
......@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc, bool state)
struct intel_pch_pll *pll,
struct intel_crtc *crtc,
bool state)
{
int reg;
u32 val;
bool cur_state;
......@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
return;
}
if (!intel_crtc->pch_pll) {
WARN(1, "asserting PCH PLL enabled with no PLL\n");
if (WARN (!pll,
"asserting PCH PLL %s with no PLL\n", state_string(state)))
return;
}
if (HAS_PCH_CPT(dev_priv->dev)) {
val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
pll->pll_reg, state_string(state), state_string(cur_state), val);
/* Make sure the selected PLL is correctly attached to the transcoder */
if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
/* Make sure the selected PLL is enabled to the transcoder */
WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
"transcoder %d PLL not enabled\n", intel_crtc->pipe);
cur_state = pll->pll_reg == _PCH_DPLL_B;
if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
"PLL[%d] not attached to this transcoder %d: %08x\n",
cur_state, crtc->pipe, pch_dpll)) {
cur_state = !!(val >> (4*crtc->pipe + 3));
WARN(cur_state != state,
"PLL[%d] not %s on this transcoder %d: %08x\n",
pll->pll_reg == _PCH_DPLL_B,
state_string(state),
crtc->pipe,
val);
}
}
reg = intel_crtc->pch_pll->pll_reg;
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PCH PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
......@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
assert_pch_refclk_enabled(dev_priv);
if (pll->active++ && pll->on) {
assert_pch_pll_enabled(dev_priv, intel_crtc);
assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
......@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
intel_crtc->base.base.id);
if (WARN_ON(pll->active == 0)) {
assert_pch_pll_disabled(dev_priv, intel_crtc);
assert_pch_pll_disabled(dev_priv, pll, NULL);
return;
}
if (--pll->active) {
assert_pch_pll_enabled(dev_priv, intel_crtc);
assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
......@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
assert_pch_pll_enabled(dev_priv,
to_intel_crtc(crtc)->pch_pll,
to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
......
......@@ -266,6 +266,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
return MODE_OK;
}
......@@ -702,6 +705,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
......@@ -1154,11 +1160,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
......@@ -1266,18 +1271,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
/* Make sure the panel is off before trying to
* change the mode
*/
}
static void intel_dp_commit(struct drm_encoder *encoder)
......@@ -1309,10 +1312,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
/* Switching the panel off requires vdd. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
......
......@@ -396,11 +396,22 @@ gmbus_xfer(struct i2c_adapter *adapter,
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
*
* If no ACK is received during the address phase of a transaction, the
* adapter must report -ENXIO. It is not clear what to return if no ACK
* is received at other times. But we have to be careful to not return
* spurious -ENXIO because that will prevent i2c and drm edid functions
* from retrying. So return -ENXIO only when gmbus properly quiescents -
* timing out seems to happen when there _is_ a ddc chip present, but
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10))
10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
}
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
......@@ -414,14 +425,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* If no ACK is received during the address phase of a transaction,
* the adapter must report -ENXIO.
* It is not clear what to return if no ACK is received at other times.
* So, we always return -ENXIO in all NAK cases, to ensure we send
* it at least during the one case that is specified.
*/
ret = -ENXIO;
goto out;
timeout:
......
......@@ -745,6 +745,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard HP t5740e Thin Client",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
......
......@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 swreq;
u32 limits;
swreq = (val & 0x3ff) << 25;
I915_WRITE(GEN6_RPNSWREQ, swreq);
limits = 0;
if (val >= dev_priv->max_delay)
val = dev_priv->max_delay;
else
limits |= dev_priv->max_delay << 24;
if (val <= dev_priv->min_delay)
val = dev_priv->min_delay;
else
limits |= dev_priv->min_delay << 16;
if (val == dev_priv->cur_delay)
return;
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
dev_priv->cur_delay = val;
}
void gen6_disable_rps(struct drm_device *dev)
......@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_cap;
u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int cur_freq, min_freq, max_freq;
int rc6_mode;
int i;
......@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_gt_force_wake_get(dev_priv);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* In units of 100MHz */
dev_priv->max_delay = rp_state_cap & 0xff;
dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
dev_priv->cur_delay = 0;
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
......@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
dev_priv->max_delay << 24 |
dev_priv->min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
......@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
......@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
min_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq = rp_state_cap & 0xff;
cur_freq = (gt_perf_status & 0xff00) >> 8;
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
......@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
dev_priv->max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* In units of 100MHz */
dev_priv->max_delay = max_freq;
dev_priv->min_delay = min_freq;
dev_priv->cur_delay = cur_freq;
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
......@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
limits |= (dev_priv->min_delay & 0x3f) << 16;
if (old != limits) {
DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
limits, old);
/* Note that the known failure case is to read back 0. */
DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
"expected %08x, was %08x\n", limits, old);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
}
......
......@@ -783,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
dtd->part2.dtd_flags |= 0x2;
dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= 0x4;
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
......@@ -820,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & 0x2)
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dtd->part2.dtd_flags & 0x4)
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
......
......@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
u16 output_flags;
} __attribute__((packed));
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
......
......@@ -673,6 +673,54 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
.name = "480p",
.clock = 107520,
.refresh = 59940,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
.hsync_end = 64, .hblank_end = 122,
.hblank_start = 842, .htotal = 857,
.progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 12, .vsync_start_f2 = 12,
.vsync_len = 12,
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
.nbr_end = 479,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "576p",
.clock = 107520,
.refresh = 50000,
.oversample = TV_OVERSAMPLE_4X,
.component_only = 1,
.hsync_end = 64, .hblank_end = 139,
.hblank_start = 859, .htotal = 863,
.progressive = true, .trilevel_sync = false,
.vsync_start_f1 = 10, .vsync_start_f2 = 10,
.vsync_len = 10,
.veq_ena = false,
.vi_end_f1 = 48, .vi_end_f2 = 48,
.nbr_end = 575,
.burst_ena = false,
.filter_table = filter_table,
},
{
.name = "720p@60Hz",
.clock = 148800,
......@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
POSTING_READ(TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(intel_tv->base.base.dev,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
......
......@@ -865,7 +865,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
rdev->config.cayman.tile_config |= 1 << 4;
else
rdev->config.cayman.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
......
......@@ -848,7 +848,6 @@ struct radeon_cs_parser {
s32 priority;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
......
......@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
(dev->pdev->subsystem_device == 0x2452)) {
(dev->pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
......
......@@ -580,7 +580,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
return 0;
}
int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
......@@ -623,3 +623,28 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
return new_page;
}
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
u32 pg_idx, pg_offset;
u32 idx_value = 0;
int new_page;
pg_idx = (idx * 4) / PAGE_SIZE;
pg_offset = (idx * 4) % PAGE_SIZE;
if (ibc->kpage_idx[0] == pg_idx)
return ibc->kpage[0][pg_offset/4];
if (ibc->kpage_idx[1] == pg_idx)
return ibc->kpage[1][pg_offset/4];
new_page = radeon_cs_update_pages(p, pg_idx);
if (new_page < 0) {
p->parser_error = new_page;
return 0;
}
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
......@@ -39,31 +39,6 @@
*/
int radeon_debugfs_sa_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
u32 pg_idx, pg_offset;
u32 idx_value = 0;
int new_page;
pg_idx = (idx * 4) / PAGE_SIZE;
pg_offset = (idx * 4) % PAGE_SIZE;
if (ibc->kpage_idx[0] == pg_idx)
return ibc->kpage[0][pg_offset/4];
if (ibc->kpage_idx[1] == pg_idx)
return ibc->kpage[1][pg_offset/4];
new_page = radeon_cs_update_pages(p, pg_idx);
if (new_page < 0) {
p->parser_error = new_page;
return 0;
}
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size)
{
......
......@@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
return ret;
goto out;
if (!gobj->base.map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
......@@ -257,8 +257,6 @@ static int udl_prime_create(struct drm_device *dev,
{
struct udl_gem_object *obj;
int npages;
int i;
struct scatterlist *iter;
npages = size / PAGE_SIZE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册