提交 aa9664ff 编写于 作者: M Mahesh Kumar 提交者: Rodrigo Vivi

drm/i915/icl: Enable 2nd DBuf slice only when needed

ICL has two slices of DBuf, each slice of size 1024 blocks.
We should not always enable slice-2. It should be enabled only if
display total required BW is > 12GBps OR more than 1 pipes are enabled.

Changes since V1:
 - typecast total_data_rate to u64 before multiplication to solve any
   possible overflow (Rodrigo)
 - fix where skl_wm_get_hw_state was memsetting ddb, resulting
   enabled_slices to become zero
 - Fix the logic of calculating ddb_size
Changes since V2:
 - If no-crtc is part of commit required_slices will have value "0",
   don't try to disable DBuf slice.
Changes since V3:
 - Create a generic helper to enable/disable slice
 - don't return early if total_data_rate is 0, it may be cursor only
   commit, or atomic modeset without any plane.
Changes since V4:
 - Solve checkpatch warnings
 - use kernel types u8/u64 instead of uint8_t/uint64_t
Changes since V5:
 - Rebase
Signed-off-by: NMahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: NRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: NRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426142517.16643-3-mahesh1.kumar@intel.com
上级 74bd8004
...@@ -12258,6 +12258,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state) ...@@ -12258,6 +12258,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
bool progress; bool progress;
enum pipe pipe; enum pipe pipe;
int i; int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
...@@ -12266,6 +12268,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) ...@@ -12266,6 +12268,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (new_crtc_state->active) if (new_crtc_state->active)
entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
/* /*
* Whenever the number of active pipes changes, we need to make sure we * Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations * update the pipes in the right order so that their ddb allocations
...@@ -12316,6 +12322,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) ...@@ -12316,6 +12322,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
progress = true; progress = true;
} }
} while (progress); } while (progress);
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
} }
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
......
...@@ -144,6 +144,10 @@ ...@@ -144,6 +144,10 @@
#define KHz(x) (1000 * (x)) #define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x)) #define MHz(x) KHz(1000 * (x))
#define KBps(x) (1000 * (x))
#define MBps(x) KBps(1000 * (x))
#define GBps(x) ((u64)1000 * MBps((x)))
/* /*
* Display related stuff * Display related stuff
*/ */
...@@ -1931,6 +1935,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, ...@@ -1931,6 +1935,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv, void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain); enum intel_display_power_domain domain);
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
......
...@@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) ...@@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return true; return true;
} }
static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
const unsigned int total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
{
const struct drm_display_mode *adjusted_mode;
u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
WARN_ON(ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &cstate->base.adjusted_mode;
total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
* 12GB/s is maximum BW supported by single DBuf slice.
*/
if (total_data_bw >= GBps(12) || num_active > 1) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
ddb_size /= 2;
}
return ddb_size;
}
static void static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate, const struct intel_crtc_state *cstate,
const unsigned int total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */ struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */) int *num_active /* out */)
{ {
...@@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, ...@@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
else else
*num_active = hweight32(dev_priv->active_crtcs); *num_active = hweight32(dev_priv->active_crtcs);
ddb_size = INTEL_INFO(dev_priv)->ddb_size; ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
WARN_ON(ddb_size == 0); *num_active, ddb);
if (INTEL_GEN(dev_priv) < 11)
ddb_size -= 4; /* 4 blocks for bypass path allocation */
/* /*
* If the state doesn't change the active CRTC's, then there's * If the state doesn't change the active CRTC's, then there's
...@@ -4261,7 +4291,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, ...@@ -4261,7 +4291,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0; return 0;
} }
skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); total_data_rate = skl_get_total_relative_data_rate(cstate,
plane_data_rate,
uv_plane_data_rate);
skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc); alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) if (alloc_size == 0)
return 0; return 0;
...@@ -4296,9 +4330,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, ...@@ -4296,9 +4330,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* *
* FIXME: we may not allocate every single block here. * FIXME: we may not allocate every single block here.
*/ */
total_data_rate = skl_get_total_relative_data_rate(cstate,
plane_data_rate,
uv_plane_data_rate);
if (total_data_rate == 0) if (total_data_rate == 0)
return 0; return 0;
...@@ -5492,8 +5523,12 @@ void skl_wm_get_hw_state(struct drm_device *dev) ...@@ -5492,8 +5523,12 @@ void skl_wm_get_hw_state(struct drm_device *dev)
/* Fully recompute DDB on first atomic commit */ /* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true; dev_priv->wm.distrust_bios_wm = true;
} else { } else {
/* Easy/common case; just sanitize DDB now if everything off */ /*
memset(ddb, 0, sizeof(*ddb)); * Easy/common case; just sanitize DDB now if everything off
* Keep dbuf slice info intact
*/
memset(ddb->plane, 0, sizeof(ddb->plane));
memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
} }
} }
......
...@@ -2619,32 +2619,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) ...@@ -2619,32 +2619,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock); mutex_unlock(&power_domains->lock);
} }
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) static inline
bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
i915_reg_t reg, bool enable)
{ {
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); u32 val, status;
POSTING_READ(DBUF_CTL);
val = I915_READ(reg);
val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(10); udelay(10);
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) status = I915_READ(reg) & DBUF_POWER_STATE;
DRM_ERROR("DBuf power enable timeout\n"); if ((enable && !status) || (!enable && status)) {
DRM_ERROR("DBus power %s timeout!\n",
enable ? "enable" : "disable");
return false;
}
return true;
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
} }
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{ {
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
POSTING_READ(DBUF_CTL); }
udelay(10); static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 11)
return 1;
return 2;
}
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
DRM_ERROR("DBuf power disable timeout!\n"); u8 req_slices)
{
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u32 val;
bool ret;
if (req_slices > intel_dbuf_max_slices(dev_priv)) {
DRM_ERROR("Invalid number of dbuf slices requested\n");
return;
}
if (req_slices == hw_enabled_slices || req_slices == 0)
return;
val = I915_READ(DBUF_CTL_S2);
if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
else
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
if (ret)
dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
} }
/*
* TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
* needed and keep it disabled as much as possible.
*/
static void icl_dbuf_enable(struct drm_i915_private *dev_priv) static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{ {
I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册