diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 062cbda1bf4a6c76acb4a0086209161d2b5d5bf7..f4af1ca0fb62d82827213d51461d9642f0cf33cf 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060 MODULE_PARM_DESC(preliminary_hw_support, "Enable preliminary hardware support. (default: false)"); -int i915_disable_power_well __read_mostly = 0; +int i915_disable_power_well __read_mostly = 1; module_param_named(disable_power_well, i915_disable_power_well, int, 0600); MODULE_PARM_DESC(disable_power_well, - "Disable the power well when possible (default: false)"); + "Disable the power well when possible (default: true)"); int i915_enable_ips __read_mostly = 1; module_param_named(enable_ips, i915_enable_ips, int, 0600); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4200c32407ecfef57557517b36c7c03b389d6930..97afd2639fb63a1e240fce2c6f97f53fd9f82e98 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); + if (obj->ring != ring && obj->last_write_seqno) { + /* Keep the seqno relative to the current ring */ + obj->last_write_seqno = seqno; + } obj->ring = ring; /* Add a reference if we're newly entering the active list. */ @@ -2653,7 +2657,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, drm_i915_private_t *dev_priv = dev->dev_private; int fence_reg; int fence_pitch_shift; - uint64_t val; if (INTEL_INFO(dev)->gen >= 6) { fence_reg = FENCE_REG_SANDYBRIDGE_0; @@ -2663,8 +2666,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, fence_pitch_shift = I965_FENCE_PITCH_SHIFT; } + fence_reg += reg * 8; + + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); + if (obj) { u32 size = obj->gtt_space->size; + uint64_t val; val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; @@ -2673,12 +2691,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - } else - val = 0; - fence_reg += reg * 8; - I915_WRITE64(fence_reg, val); - POSTING_READ(fence_reg); + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } } static void i915_write_fence_reg(struct drm_device *dev, int reg, @@ -2796,56 +2818,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, return fence - dev_priv->fence_regs; } -struct write_fence { - struct drm_device *dev; - struct drm_i915_gem_object *obj; - int fence; -}; - -static void i915_gem_write_fence__ipi(void *data) -{ - struct write_fence *args = data; - - /* Required for SNB+ with LLC */ - wbinvd(); - - /* Required for VLV */ - i915_gem_write_fence(args->dev, args->fence, args->obj); -} - static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct write_fence args = { - .dev = obj->base.dev, - .fence = fence_number(dev_priv, fence), - .obj = enable ? obj : NULL, - }; - - /* In order to fully serialize access to the fenced region and - * the update to the fence register we need to take extreme - * measures on SNB+. In theory, the write to the fence register - * flushes all memory transactions before, and coupled with the - * mb() placed around the register write we serialise all memory - * operations with respect to the changes in the tiler. Yet, on - * SNB+ we need to take a step further and emit an explicit wbinvd() - * on each processor in order to manually flush all memory - * transactions before updating the fence register. - * - * However, Valleyview complicates matter. There the wbinvd is - * insufficient and unlike SNB/IVB requires the serialising - * register write. (Note that that register write by itself is - * conversely not sufficient for SNB+.) To compromise, we do both. - */ - if (INTEL_INFO(args.dev)->gen >= 6) - on_each_cpu(i915_gem_write_fence__ipi, &args, 1); - else - i915_gem_write_fence(args.dev, args.fence, args.obj); + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); if (enable) { - obj->fence_reg = args.fence; + obj->fence_reg = reg; fence->obj = obj; list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); } else { @@ -4611,7 +4594,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b739712340131dae2e35f56613fae22f2ca819f0..26e162bb3a5158d5da3f1b5e9b2614c7515ede6b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; + case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ + max_link_bw = DP_LINK_BW_2_7; + break; default: + WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", + max_link_bw); max_link_bw = DP_LINK_BW_1_62; break; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccbdd83f5220e2811a1c6f86cb2068c7aac493e3..d10e6735771fe55bd738bc3ee2d840f384d04471 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5500,9 +5500,38 @@ void intel_gt_init(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } } else if (IS_GEN6(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e51ab552046c9ef943ec7d7bb1de5b5e24467154..664118d8c1d6426353ed97bb61b1113369a7678a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) return I915_READ(acthd_reg); } +static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(ring->dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + static int init_ring_common(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_get(dev_priv); + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) struct pipe_control *pc = ring->private; struct drm_i915_gem_object *obj; - if (!ring->private) - return; - obj = pc->obj; kunmap(sg_page(obj->pages->sgl)); @@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) drm_gem_object_unreference(&obj->base); kfree(pc); - ring->private = NULL; } static int init_render_ring(struct intel_ring_buffer *ring) @@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (HAS_BROKEN_CS_TLB(dev)) drm_gem_object_unreference(to_gem_object(ring->private)); - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) + cleanup_pipe_control(ring); + + ring->private = NULL; } static void @@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring) ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -1237,10 +1251,9 @@ static int init_status_page(struct intel_ring_buffer *ring) return ret; } -static int init_phys_hws_pga(struct intel_ring_buffer *ring) +static int init_phys_status_page(struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 addr; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = @@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring) return -ENOMEM; } - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(ring->status_page.page_addr, 0, PAGE_SIZE); @@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, return ret; } else { BUG_ON(ring->id != RCS); - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; } @@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) } if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; }