提交 8ad2bc97 编写于 作者: D Dave Airlie

Merge branch 'drm-intel-next' of git://git.freedesktop.org/git/drm-intel into drm-next

- fine-grained display power domains for byt (Imre)
- runtime pm prep patches for !hsw from Paulo
- WiZ hashing flag updates from Ville
- ppgtt setup cleanup and enabling of full 4G range on bdw (Ben)
- fixes from Jesse for the inherited intial config code
- gpu reset code improvements from Mika
- per-pipe num_planes refactoring from Damien
- stability fixes around bdw forcewake handling and other bdw w/a from Mika
  Ken
- and as usual a pile of smaller fixes all over

* 'drm-intel-next' of git://git.freedesktop.org/git/drm-intel: (107 commits)
  drm/i915: Go OCD on the Makefile
  drm/i915: Implement command buffer parsing logic
  drm/i915: Refactor shmem pread setup
  drm/i915: Avoid div by zero when pixel clock is large
  drm/i915: power domains: add vlv power wells
  drm/i915: factor out intel_set_cpu_fifo_underrun_reporting_nolock
  drm/i915: vlv: factor out valleyview_display_irq_install
  drm/i915: sanity check power well sw state against hw state
  drm/i915: factor out reset_vblank_counter
  drm/i915: sanitize PUNIT register macro definitions
  drm/i915: vlv: keep first level vblank IRQs masked
  drm/i915: check pipe power domain when reading its hw state
  drm/i915: check port power domain when reading the encoder hw state
  drm/i915: get port power domain in connector detect handlers
  drm/i915: add port power domains
  drm/i915: add noop power well handlers instead of NULL checking them
  drm/i915: split power well 'set' handler to separate enable/disable/sync_hw
  drm/i915: add init power domain to always-on power wells
  drm/i915: move power domain macros to intel_pm.c
  drm/i915: Disable full ppgtt by default
  ...
上级 e40d6410 e19b9137
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -3,58 +3,69 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gpu_error.o \
# Please keep these build lists sorted!
# core driver code
i915-y := i915_drv.o \
i915_params.o \
i915_suspend.o \
i915_gem.o \
i915_sysfs.o \
intel_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_context.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_params.o \
i915_sysfs.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
i915_ums.o \
intel_ringbuffer.o \
intel_uncore.o
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \
intel_dsi.o \
intel_dsi_cmd.o \
intel_dsi_pll.o \
intel_bios.o \
intel_ddi.o \
intel_dp.o \
intel_hdmi.o \
intel_sdvo.o \
intel_modes.o \
intel_panel.o \
intel_pm.o \
intel_i2c.o \
intel_tv.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
intel_sprite.o \
intel_sideband.o \
intel_uncore.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
# modesetting output/encoder code
i915-y += dvo_ch7017.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
dvo_tfp410.o \
dvo_sil164.o \
dvo_ns2501.o \
i915_gem_dmabuf.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
dvo_sil164.o \
dvo_tfp410.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# legacy horrors
i915-y += i915_dma.o \
i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o
......
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Brad Volkin <bradley.d.volkin@intel.com>
*
*/
#include "i915_drv.h"
/**
* DOC: i915 batch buffer command parser
*
* Motivation:
* Certain OpenGL features (e.g. transform feedback, performance monitoring)
* require userspace code to submit batches containing commands such as
* MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
* generations of the hardware will noop these commands in "unsecure" batches
* (which includes all userspace batches submitted via i915) even though the
* commands may be safe and represent the intended programming model of the
* device.
*
* The software command parser is similar in operation to the command parsing
* done in hardware for unsecure batches. However, the software parser allows
* some operations that would be noop'd by hardware, if the parser determines
* the operation is safe, and submits the batch as "secure" to prevent hardware
* parsing.
*
* Threats:
* At a high level, the hardware (and software) checks attempt to prevent
* granting userspace undue privileges. There are three categories of privilege.
*
* First, commands which are explicitly defined as privileged or which should
* only be used by the kernel driver. The parser generally rejects such
* commands, though it may allow some from the drm master process.
*
* Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser
* provides a whitelist of registers which userspace may safely access (for both
* normal and drm master processes).
*
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands.
*
* The majority of the problematic commands fall in the MI_* range, with only a
* few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
*
* Implementation:
* Each ring maintains tables of commands and registers which the parser uses in
* scanning batch buffers submitted to that ring.
*
* Since the set of commands that the parser must check for is significantly
* smaller than the number of commands supported, the parser tables contain only
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
* implementated via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
* To handle this, the parser provides a way to define explicit "skip" entries
* in the per-ring command tables.
*
* Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser
* implements a number of checks, including the privileged memory checks, via a
* general bitmasking mechanism.
*/
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
return 0xFFFF;
else
return 0xFF;
}
DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
return 0;
}
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_RC_CLIENT) {
if (subclient == INSTR_MEDIA_SUBCLIENT)
return 0xFFF;
else
return 0xFF;
}
DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
return 0;
}
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
else if (client == INSTR_BC_CLIENT)
return 0xFF;
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
static void validate_cmds_sorted(struct intel_ring_buffer *ring)
{
int i;
if (!ring->cmd_tables || ring->cmd_table_count == 0)
return;
for (i = 0; i < ring->cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &ring->cmd_tables[i];
u32 previous = 0;
int j;
for (j = 0; j < table->count; j++) {
const struct drm_i915_cmd_descriptor *desc =
&table->table[i];
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous)
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
ring->id, i, j, curr, previous);
previous = curr;
}
}
}
static void check_sorted(int ring_id, const u32 *reg_table, int reg_count)
{
int i;
u32 previous = 0;
for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i];
if (curr < previous)
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
ring_id, i, curr, previous);
previous = curr;
}
}
static void validate_regs_sorted(struct intel_ring_buffer *ring)
{
check_sorted(ring->id, ring->reg_table, ring->reg_count);
check_sorted(ring->id, ring->master_reg_table, ring->master_reg_count);
}
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
* @ring: the ringbuffer to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_ring_buffer based on whether the platform requires software
* command parsing.
*/
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring)
{
if (!IS_GEN7(ring->dev))
return;
switch (ring->id) {
case RCS:
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
ring->id);
BUG();
}
validate_cmds_sorted(ring);
validate_regs_sorted(ring);
}
static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(const struct drm_i915_cmd_table *table,
u32 cmd_header)
{
int i;
for (i = 0; i < table->count; i++) {
const struct drm_i915_cmd_descriptor *desc = &table->table[i];
u32 masked_cmd = desc->cmd.mask & cmd_header;
u32 masked_value = desc->cmd.value & desc->cmd.mask;
if (masked_cmd == masked_value)
return desc;
}
return NULL;
}
/*
* Returns a pointer to a descriptor for the command specified by cmd_header.
*
* The caller must supply space for a default descriptor via the default_desc
* parameter. If no descriptor for the specified command exists in the ring's
* command parser tables, this function fills in default_desc based on the
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_ring_buffer *ring,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
u32 mask;
int i;
for (i = 0; i < ring->cmd_table_count; i++) {
const struct drm_i915_cmd_descriptor *desc;
desc = find_cmd_in_table(&ring->cmd_tables[i], cmd_header);
if (desc)
return desc;
}
mask = ring->get_cmd_length_mask(cmd_header);
if (!mask)
return NULL;
BUG_ON(!default_desc);
default_desc->flags = CMD_DESC_SKIP;
default_desc->length.mask = mask;
return default_desc;
}
static bool valid_reg(const u32 *table, int count, u32 addr)
{
if (table && count != 0) {
int i;
for (i = 0; i < count; i++) {
if (table[i] == addr)
return true;
}
}
return false;
}
static u32 *vmap_batch(struct drm_i915_gem_object *obj)
{
int i;
void *addr = NULL;
struct sg_page_iter sg_iter;
struct page **pages;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL) {
DRM_DEBUG_DRIVER("Failed to get space for pages\n");
goto finish;
}
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
pages[i] = sg_page_iter_page(&sg_iter);
i++;
}
addr = vmap(pages, i, 0, PAGE_KERNEL);
if (addr == NULL) {
DRM_DEBUG_DRIVER("Failed to vmap pages\n");
goto finish;
}
finish:
if (pages)
drm_free_large(pages);
return (u32*)addr;
}
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module paramter.
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring)
{
/* No command tables indicates a platform without parsing */
if (!ring->cmd_tables)
return false;
return (i915.enable_cmd_parser == 1);
}
#define LENGTH_BIAS 2
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
* @is_master: is the submitting process the drm master?
*
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
* Return: non-zero if the parser finds violations or otherwise fails
*/
int i915_parse_cmds(struct intel_ring_buffer *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master)
{
int ret = 0;
u32 *cmd, *batch_base, *batch_end;
struct drm_i915_cmd_descriptor default_desc = { 0 };
int needs_clflush = 0;
ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
return ret;
}
batch_base = vmap_batch(batch_obj);
if (!batch_base) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
i915_gem_object_unpin_pages(batch_obj);
return -ENOMEM;
}
if (needs_clflush)
drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
cmd = batch_base + (batch_start_offset / sizeof(*cmd));
batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
while (cmd < batch_end) {
const struct drm_i915_cmd_descriptor *desc;
u32 length;
if (*cmd == MI_BATCH_BUFFER_END)
break;
desc = find_cmd(ring, *cmd, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
ret = -EINVAL;
break;
}
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
length = ((*cmd & desc->length.mask) + LENGTH_BIAS);
if ((batch_end - cmd) < length) {
DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%d batchlen=%ld\n",
*cmd,
length,
batch_end - cmd);
ret = -EINVAL;
break;
}
if (desc->flags & CMD_DESC_REJECT) {
DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd);
ret = -EINVAL;
break;
}
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
ret = -EINVAL;
break;
}
if (desc->flags & CMD_DESC_REGISTER) {
u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
if (!valid_reg(ring->reg_table,
ring->reg_count, reg_addr)) {
if (!is_master ||
!valid_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr)) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr,
*cmd,
ring->id);
ret = -EINVAL;
break;
}
}
}
if (desc->flags & CMD_DESC_BITMASK) {
int i;
for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
u32 dword;
if (desc->bits[i].mask == 0)
break;
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
if (dword != desc->bits[i].expected) {
DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
dword, ring->id);
ret = -EINVAL;
break;
}
}
if (ret)
break;
}
cmd += length;
}
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
}
vunmap(batch_base);
i915_gem_object_unpin_pages(batch_obj);
return ret;
}
......@@ -602,7 +602,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
if (INTEL_INFO(dev)->gen >= 8) {
int i;
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
......@@ -615,16 +614,16 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i)));
}
for_each_pipe(i) {
for_each_pipe(pipe) {
seq_printf(m, "Pipe %c IMR:\t%08x\n",
pipe_name(i),
I915_READ(GEN8_DE_PIPE_IMR(i)));
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IMR(pipe)));
seq_printf(m, "Pipe %c IIR:\t%08x\n",
pipe_name(i),
I915_READ(GEN8_DE_PIPE_IIR(i)));
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IIR(pipe)));
seq_printf(m, "Pipe %c IER:\t%08x\n",
pipe_name(i),
I915_READ(GEN8_DE_PIPE_IER(i)));
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
......@@ -1348,6 +1347,8 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
return 0;
}
intel_runtime_pm_get(dev_priv);
if (intel_fbc_enabled(dev)) {
seq_puts(m, "FBC enabled\n");
} else {
......@@ -1391,6 +1392,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
seq_putc(m, '\n');
}
intel_runtime_pm_put(dev_priv);
return 0;
}
......@@ -1405,11 +1409,15 @@ static int i915_ips_status(struct seq_file *m, void *unused)
return 0;
}
intel_runtime_pm_get(dev_priv);
if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
seq_puts(m, "enabled\n");
else
seq_puts(m, "disabled\n");
intel_runtime_pm_put(dev_priv);
return 0;
}
......@@ -1420,6 +1428,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
......@@ -1429,6 +1439,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
intel_runtime_pm_put(dev_priv);
seq_printf(m, "self-refresh: %s\n",
sr_enabled ? "enabled" : "disabled");
......@@ -1468,7 +1480,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
int ret = 0;
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
......@@ -1476,12 +1488,13 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
intel_runtime_pm_get(dev_priv);
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
goto out;
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
......@@ -1498,10 +1511,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
((ia_freq >> 8) & 0xff) * 100);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
out:
intel_runtime_pm_put(dev_priv);
return ret;
}
static int i915_gfxec(struct seq_file *m, void *unused)
......@@ -1757,7 +1771,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
return;
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
seq_printf(m, "Page tables: %d\n", ppgtt->num_pt_pages);
seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
......@@ -1972,12 +1986,16 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
intel_runtime_pm_get(dev_priv);
rdmsrl(MSR_RAPL_POWER_UNIT, power);
power = (power & 0x1f00) >> 8;
units = 1000000 / (1 << power); /* convert to uJ */
power = I915_READ(MCH_SECP_NRG_STTS);
power *= units;
intel_runtime_pm_put(dev_priv);
seq_printf(m, "%llu", (long long unsigned)power);
return 0;
......@@ -1997,7 +2015,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
mutex_lock(&dev_priv->pc8.lock);
seq_printf(m, "Requirements met: %s\n",
yesno(dev_priv->pc8.requirements_met));
seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
seq_printf(m, "IRQs disabled: %s\n",
yesno(dev_priv->pc8.irqs_disabled));
......@@ -2030,6 +2048,28 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
case POWER_DOMAIN_PORT_DDI_A_2_LANES:
return "PORT_DDI_A_2_LANES";
case POWER_DOMAIN_PORT_DDI_A_4_LANES:
return "PORT_DDI_A_4_LANES";
case POWER_DOMAIN_PORT_DDI_B_2_LANES:
return "PORT_DDI_B_2_LANES";
case POWER_DOMAIN_PORT_DDI_B_4_LANES:
return "PORT_DDI_B_4_LANES";
case POWER_DOMAIN_PORT_DDI_C_2_LANES:
return "PORT_DDI_C_2_LANES";
case POWER_DOMAIN_PORT_DDI_C_4_LANES:
return "PORT_DDI_C_4_LANES";
case POWER_DOMAIN_PORT_DDI_D_2_LANES:
return "PORT_DDI_D_2_LANES";
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
return "PORT_DDI_D_4_LANES";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
return "PORT_CRT";
case POWER_DOMAIN_PORT_OTHER:
return "PORT_OTHER";
case POWER_DOMAIN_VGA:
return "VGA";
case POWER_DOMAIN_AUDIO:
......@@ -2180,6 +2220,7 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_connector->encoder;
struct drm_display_mode *mode;
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, drm_get_connector_name(connector),
......@@ -2202,6 +2243,9 @@ static void intel_connector_info(struct seq_file *m,
else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
intel_seq_print_mode(m, 2, mode);
}
static int i915_display_info(struct seq_file *m, void *unused)
......@@ -3167,9 +3211,8 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
DRM_INFO("Manually setting wedged to %llu\n", val);
i915_handle_error(dev, val);
i915_handle_error(dev, val,
"Manually setting wedged to %llu", val);
return 0;
}
......
......@@ -1321,12 +1321,12 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_vga_switcheroo;
intel_power_domains_init_hw(dev_priv);
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem_stolen;
intel_power_domains_init_hw(dev);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
......@@ -1343,7 +1343,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0) {
intel_display_power_put(dev, POWER_DOMAIN_VGA);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
return 0;
}
......@@ -1381,7 +1381,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
WARN_ON(dev_priv->mm.aliasing_ppgtt);
drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_power:
intel_display_power_put(dev, POWER_DOMAIN_VGA);
intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
drm_irq_uninstall(dev);
cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev);
......@@ -1480,12 +1480,16 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_device_info *info;
enum pipe pipe;
info = (struct intel_device_info *)&dev_priv->info;
info->num_sprites = 1;
if (IS_VALLEYVIEW(dev))
info->num_sprites = 2;
for_each_pipe(pipe)
info->num_sprites[pipe] = 2;
else
for_each_pipe(pipe)
info->num_sprites[pipe] = 1;
if (i915.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
......@@ -1702,7 +1706,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
intel_power_domains_init(dev);
intel_power_domains_init(dev_priv);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
......@@ -1731,7 +1735,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_power_well:
intel_power_domains_remove(dev);
intel_power_domains_remove(dev_priv);
drm_vblank_cleanup(dev);
out_gem_unload:
if (dev_priv->mm.inactive_shrinker.scan_objects)
......@@ -1781,8 +1785,8 @@ int i915_driver_unload(struct drm_device *dev)
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev, true);
intel_power_domains_remove(dev);
intel_display_set_init_power(dev_priv, true);
intel_power_domains_remove(dev_priv);
i915_teardown_sysfs(dev);
......
......@@ -265,6 +265,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
};
......@@ -274,6 +275,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
};
......@@ -401,15 +403,13 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return false;
/* Until we get further testing... */
if (IS_GEN8(dev)) {
WARN_ON(!i915.preliminary_hw_support);
return false;
}
if (i915.semaphores >= 0)
return i915.semaphores;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
......@@ -434,7 +434,7 @@ static int i915_drm_freeze(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
hsw_disable_package_c8(dev_priv);
intel_display_set_init_power(dev, true);
intel_display_set_init_power(dev_priv, true);
drm_kms_helper_poll_disable(dev);
......@@ -477,6 +477,8 @@ static int i915_drm_freeze(struct drm_device *dev)
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
dev_priv->suspend_count++;
return 0;
}
......@@ -556,7 +558,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
mutex_unlock(&dev->struct_mutex);
}
intel_power_domains_init_hw(dev);
intel_power_domains_init_hw(dev_priv);
i915_restore_state(dev);
intel_opregion_setup(dev);
......@@ -847,6 +849,7 @@ static int i915_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!HAS_RUNTIME_PM(dev));
assert_force_wake_inactive(dev_priv);
DRM_DEBUG_KMS("Suspending device\n");
......
......@@ -79,7 +79,7 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites + (s) + 'A')
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
enum port {
PORT_A = 0,
......@@ -114,6 +114,17 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_PORT_DDI_A_2_LANES,
POWER_DOMAIN_PORT_DDI_A_4_LANES,
POWER_DOMAIN_PORT_DDI_B_2_LANES,
POWER_DOMAIN_PORT_DDI_B_4_LANES,
POWER_DOMAIN_PORT_DDI_C_2_LANES,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
POWER_DOMAIN_INIT,
......@@ -121,8 +132,6 @@ enum intel_display_power_domain {
POWER_DOMAIN_NUM,
};
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
......@@ -130,14 +139,6 @@ enum intel_display_power_domain {
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP))
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
enum hpd_pin {
HPD_NONE = 0,
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
......@@ -159,6 +160,7 @@ enum hpd_pin {
I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
......@@ -303,6 +305,10 @@ struct drm_i915_error_state {
struct kref ref;
struct timeval time;
char error_msg[128];
u32 reset_count;
u32 suspend_count;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
......@@ -360,7 +366,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *ctx, *hws_page;
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
struct drm_i915_error_request {
long jiffies;
......@@ -375,6 +381,9 @@ struct drm_i915_error_state {
u32 pp_dir_base;
};
} vm_info;
pid_t pid;
char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
......@@ -499,7 +508,7 @@ struct intel_uncore {
unsigned fw_rendercount;
unsigned fw_mediacount;
struct delayed_work force_wake_work;
struct timer_list force_wake_timer;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
......@@ -534,7 +543,7 @@ struct intel_uncore {
struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes:3;
u8 num_sprites:2;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
......@@ -652,12 +661,12 @@ struct i915_address_space {
enum i915_cache_level level,
bool valid); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries,
uint64_t start,
uint64_t length,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
uint64_t start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_address_space *vm);
};
......@@ -691,21 +700,21 @@ struct i915_gtt {
};
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
#define GEN8_LEGACY_PDPS 4
struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
struct page *gen8_pt_pages;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
};
struct page *pd_pages;
int num_pd_pages;
int num_pt_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[4];
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
......@@ -1016,6 +1025,36 @@ struct intel_ilk_power_mgmt {
struct drm_i915_gem_object *renderctx;
};
struct drm_i915_private;
struct i915_power_well;
struct i915_power_well_ops {
/*
* Synchronize the well's hw state to match the current sw state, for
* example enable/disable it based on the current refcount. Called
* during driver init and resume time, possibly after first calling
* the enable/disable handlers.
*/
void (*sync_hw)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Enable the well and resources that depend on it (for example
* interrupts located on the well). Called after the 0->1 refcount
* transition.
*/
void (*enable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/*
* Disable the well and resources that depend on it. Called after
* the 1->0 refcount transition.
*/
void (*disable)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
/* Returns the hw enabled state. */
bool (*is_enabled)(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well);
};
/* Power well structure for haswell */
struct i915_power_well {
const char *name;
......@@ -1023,11 +1062,8 @@ struct i915_power_well {
/* power well enable/disable usage count */
int count;
unsigned long domains;
void *data;
void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
bool enable);
bool (*is_enabled)(struct drm_device *dev,
struct i915_power_well *power_well);
unsigned long data;
const struct i915_power_well_ops *ops;
};
struct i915_power_domains {
......@@ -1124,6 +1160,14 @@ struct i915_gem_mm {
*/
bool interruptible;
/**
* Is the GPU currently considered idle, or busy executing userspace
* requests? Whilst idle, we attempt to power down the hardware and
* display clocks. In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
bool busy;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
......@@ -1313,11 +1357,10 @@ struct ilk_wm_values {
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
* everything, so we have the requirements_met and gpu_idle variables. When we
* switch requirements_met or gpu_idle to true we decrease disable_count, and
* increase it in the opposite case. The requirements_met variable is true when
* all the CRTCs, encoders and the power well are disabled. The gpu_idle
* variable is true when the GPU is idle.
* everything, so we have the requirements_met variable. When we switch
* requirements_met to true we decrease disable_count, and increase it in the
* opposite case. The requirements_met variable is true when all the CRTCs,
* encoders and the power well are disabled.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
......@@ -1340,7 +1383,6 @@ struct ilk_wm_values {
*/
struct i915_package_c8 {
bool requirements_met;
bool gpu_idle;
bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
......@@ -1427,6 +1469,8 @@ typedef struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
......@@ -1594,6 +1638,8 @@ typedef struct drm_i915_private {
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
u32 suspend_count;
} drm_i915_private_t;
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
......@@ -1745,7 +1791,6 @@ struct drm_i915_gem_object {
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
......@@ -1791,6 +1836,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
struct drm_file *file;
struct {
spinlock_t lock;
......@@ -1803,6 +1849,90 @@ struct drm_i915_file_private {
atomic_t rps_wait_boost;
};
/*
* A command that requires special handling by the command parser.
*/
struct drm_i915_cmd_descriptor {
/*
* Flags describing how the command parser processes the command.
*
* CMD_DESC_FIXED: The command has a fixed length if this is set,
* a length mask if not set
* CMD_DESC_SKIP: The command is allowed but does not follow the
* standard length encoding for the opcode range in
* which it falls
* CMD_DESC_REJECT: The command is never allowed
* CMD_DESC_REGISTER: The command should be checked against the
* register whitelist for the appropriate ring
* CMD_DESC_MASTER: The command is allowed if the submitting process
* is the DRM master
*/
u32 flags;
#define CMD_DESC_FIXED (1<<0)
#define CMD_DESC_SKIP (1<<1)
#define CMD_DESC_REJECT (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK (1<<4)
#define CMD_DESC_MASTER (1<<5)
/*
* The command's unique identification bits and the bitmask to get them.
* This isn't strictly the opcode field as defined in the spec and may
* also include type, subtype, and/or subop fields.
*/
struct {
u32 value;
u32 mask;
} cmd;
/*
* The command's length. The command is either fixed length (i.e. does
* not include a length field) or has a length field mask. The flag
* CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
* a length mask. All command entries in a command table must include
* length information.
*/
union {
u32 fixed;
u32 mask;
} length;
/*
* Describes where to find a register address in the command to check
* against the ring's register whitelist. Only valid if flags has the
* CMD_DESC_REGISTER bit set.
*/
struct {
u32 offset;
u32 mask;
} reg;
#define MAX_CMD_DESC_BITMASKS 3
/*
* Describes command checks where a particular dword is masked and
* compared against an expected value. If the command does not match
* the expected value, the parser rejects it. Only valid if flags has
* the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
* are valid.
*/
struct {
u32 offset;
u32 mask;
u32 expected;
} bits[MAX_CMD_DESC_BITMASKS];
};
/*
* A table of commands requiring special handling by the command parser.
*
* Each ring has an array of tables. Each table consists of an array of command
* descriptors, which must be sorted with command opcodes in ascending order.
*/
struct drm_i915_cmd_table {
const struct drm_i915_cmd_descriptor *table;
int count;
};
#define INTEL_INFO(dev) (&to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
......@@ -1965,6 +2095,7 @@ struct i915_params {
int enable_pc8;
int pc8_timeout;
int invert_brightness;
int enable_cmd_parser;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
......@@ -2004,7 +2135,9 @@ extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
__printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
int new_delay);
......@@ -2025,6 +2158,9 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe,
u32 status_mask);
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
......@@ -2097,6 +2233,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
......@@ -2163,8 +2302,10 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
}
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
......@@ -2365,63 +2506,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
intel_gtt_chipset_flush();
}
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{
if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
return false;
if (i915.enable_ppgtt == 1 && full)
return false;
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return false;
}
#endif
if (full)
return HAS_PPGTT(dev);
else
return HAS_ALIASING_PPGTT(dev);
}
static inline void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref);
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
ppgtt->base.cleanup(&ppgtt->base);
}
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
......@@ -2478,7 +2563,8 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
void i915_capture_error_state(struct drm_device *dev);
void i915_capture_error_state(struct drm_device *dev, bool wedge,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
......@@ -2487,6 +2573,14 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
/* i915_cmd_parser.c */
void i915_cmd_parser_init_ring(struct intel_ring_buffer *ring);
bool i915_needs_cmd_parser(struct intel_ring_buffer *ring);
int i915_parse_cmds(struct intel_ring_buffer *ring,
struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset,
bool is_master);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
......@@ -2565,6 +2659,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
......@@ -2599,6 +2694,7 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
......
......@@ -61,6 +61,7 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level)
......@@ -326,6 +327,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
return 0;
}
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
* flush the object from the CPU cache.
*/
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush)
{
int ret;
*needs_clflush = 0;
if (!obj->base.filp)
return -EINVAL;
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
return ret;
}
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
......@@ -423,23 +460,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}
ret = i915_gem_object_get_pages(obj);
ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
offset = args->offset;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
......@@ -2148,7 +2172,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position, request_start;
int was_empty;
int ret;
request_start = intel_ring_get_tail(ring);
......@@ -2199,7 +2222,6 @@ int __i915_add_request(struct intel_ring_buffer *ring,
i915_gem_context_reference(request->ctx);
request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
......@@ -2220,13 +2242,11 @@ int __i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->ums.mm_suspended) {
i915_queue_hangcheck(ring->dev);
if (was_empty) {
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
if (out_seqno)
......@@ -2259,14 +2279,13 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
return true;
if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
if (dev_priv->gpu_error.stop_rings == 0 &&
i915_gem_context_is_default(ctx)) {
DRM_ERROR("gpu hanging too fast, banning!\n");
} else {
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
} else if (dev_priv->gpu_error.stop_rings == 0) {
DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
return true;
}
return false;
......@@ -2303,11 +2322,13 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
static struct drm_i915_gem_request *
i915_gem_find_first_non_complete(struct intel_ring_buffer *ring)
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
const u32 completed_seqno = ring->get_seqno(ring, false);
u32 completed_seqno;
completed_seqno = ring->get_seqno(ring, false);
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
......@@ -2325,7 +2346,7 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request *request;
bool ring_hung;
request = i915_gem_find_first_non_complete(ring);
request = i915_gem_find_active_request(ring);
if (request == NULL)
return;
......@@ -2417,7 +2438,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
*/
void
static void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
......@@ -2744,7 +2765,7 @@ int i915_vma_unbind(struct i915_vma *vma)
i915_gem_gtt_finish_object(obj);
list_del(&vma->mm_list);
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
......@@ -4860,6 +4881,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
file_priv->file = file;
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
......
......@@ -99,6 +99,50 @@
static int do_switch(struct intel_ring_buffer *ring,
struct i915_hw_context *to);
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
ppgtt->base.cleanup(&ppgtt->base);
}
static void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
do_ppgtt_cleanup(ppgtt);
kfree(ppgtt);
}
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
......@@ -714,7 +758,7 @@ static int do_switch(struct intel_ring_buffer *ring,
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
* @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
......@@ -746,9 +790,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
if (!HAS_HW_CONTEXTS(dev))
return -ENODEV;
......@@ -775,9 +816,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct i915_hw_context *ctx;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
if (args->ctx_id == DEFAULT_CONTEXT_ID)
return -ENOENT;
......
......@@ -1182,6 +1182,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
if (i915_needs_cmd_parser(ring)) {
ret = i915_parse_cmds(ring,
batch_obj,
args->batch_start_offset,
file->is_master);
if (ret)
goto err;
/*
* XXX: Actually do this when enabling batch copy...
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
* from MI_BATCH_BUFFER_START commands issued in the
* dispatch_execbuffer implementations. We specifically don't
* want that set when the command parser is enabled.
*/
}
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
......
此差异已折叠。
......@@ -304,22 +304,54 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct drm_i915_error_object *obj)
{
int page, offset, elt;
for (page = offset = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n", offset,
obj->pages[page][elt]);
offset += 4;
}
}
}
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
int i, j, page, offset, elt;
int i, j, offset, elt;
int max_hangcheck_score;
if (!error) {
err_printf(m, "no error state collected\n");
goto out;
}
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
max_hangcheck_score = 0;
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
if (error->ring[i].hangcheck_score > max_hangcheck_score)
max_hangcheck_score = error->ring[i].hangcheck_score;
}
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
if (error->ring[i].hangcheck_score == max_hangcheck_score &&
error->ring[i].pid != -1) {
err_printf(m, "Active process (on ring %s): %s [%d]\n",
ring_str(i),
error->ring[i].comm,
error->ring[i].pid);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
......@@ -362,18 +394,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
if ((obj = error->ring[i].batchbuffer)) {
err_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name,
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
if (error->ring[i].pid != -1)
err_printf(m, " (submitted by %s [%d])",
error->ring[i].comm,
error->ring[i].pid);
err_printf(m, " --- gtt_offset = 0x%08x\n",
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n", offset,
obj->pages[page][elt]);
offset += 4;
}
}
print_error_obj(m, obj);
}
obj = error->ring[i].wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
dev_priv->ring[i].name, obj->gtt_offset);
print_error_obj(m, obj);
}
if (error->ring[i].num_requests) {
......@@ -392,15 +429,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->ring[i].name,
obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
err_printf(m, "%08x : %08x\n",
offset,
obj->pages[page][elt]);
offset += 4;
}
}
print_error_obj(m, obj);
}
if ((obj = error->ring[i].hws_page)) {
......@@ -666,7 +695,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
struct drm_i915_error_state *error,
int *ring_id)
{
uint32_t error_code = 0;
int i;
......@@ -676,9 +706,14 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
for (i = 0; i < I915_NUM_RINGS; i++)
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG)
for (i = 0; i < I915_NUM_RINGS; i++) {
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
if (ring_id)
*ring_id = i;
return error->ring[i].ipehr ^ error->ring[i].instdone;
}
}
return error_code;
}
......@@ -716,87 +751,6 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
/* This assumes all batchbuffers are executed from the PPGTT. It might have to
* change in the future. */
static bool is_active_vm(struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
if (INTEL_INFO(dev)->gen < 7)
return i915_is_ggtt(vm);
/* FIXME: This ignores that the global gtt vm is also on this list. */
ppgtt = container_of(vm, struct i915_hw_ppgtt, base);
if (INTEL_INFO(dev)->gen >= 8) {
u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32;
pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0));
return pdp0 == ppgtt->pd_dma_addr[0];
} else {
u32 pp_db;
pp_db = I915_READ(RING_PP_DIR_BASE(ring));
return (pp_db >> 10) == ppgtt->pd_offset;
}
}
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct i915_address_space *vm;
struct i915_vma *vma;
struct drm_i915_gem_object *obj;
bool found_active = false;
u32 seqno;
if (!ring->get_seqno)
return NULL;
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
if (WARN_ON(ring->id != RCS))
return NULL;
obj = ring->scratch.obj;
if (obj != NULL &&
acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_ggtt_object_create(dev_priv, obj);
}
seqno = ring->get_seqno(ring, false);
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
if (!is_active_vm(vm, ring))
continue;
found_active = true;
list_for_each_entry(vma, &vm->active_list, mm_list) {
obj = vma->obj;
if (obj->ring != ring)
continue;
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj, vm);
}
}
WARN_ON(!found_active);
return NULL;
}
static void i915_record_ring_state(struct drm_device *dev,
struct intel_ring_buffer *ring,
struct drm_i915_error_ring *ering)
......@@ -945,8 +899,39 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_record_ring_state(dev, ring, &error->ring[i]);
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
error->ring[i].pid = -1;
request = i915_gem_find_active_request(ring);
if (request) {
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
*/
error->ring[i].batchbuffer =
i915_error_object_create(dev_priv,
request->batch_obj,
request->ctx ?
request->ctx->vm :
&dev_priv->gtt.base);
if (HAS_BROKEN_CS_TLB(dev_priv->dev) &&
ring->scratch.obj)
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj);
if (request->file_priv) {
struct task_struct *task;
rcu_read_lock();
task = pid_task(request->file_priv->file->pid,
PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
}
rcu_read_unlock();
}
}
error->ring[i].ringbuffer =
i915_error_ggtt_object_create(dev_priv, ring->obj);
......@@ -1113,6 +1098,40 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
i915_get_extra_instdone(dev, error->extra_instdone);
}
static void i915_error_capture_msg(struct drm_device *dev,
struct drm_i915_error_state *error,
bool wedged,
const char *error_msg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
ecode = i915_error_generate_code(dev_priv, error, &ring_id);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:0x%08x", ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
error->ring[ring_id].comm,
error->ring[ring_id].pid);
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
error_msg,
wedged ? "reset" : "continue");
}
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
error->reset_count = i915_reset_count(&dev_priv->gpu_error);
error->suspend_count = dev_priv->suspend_count;
}
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
......@@ -1122,19 +1141,13 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
void i915_capture_error_state(struct drm_device *dev)
void i915_capture_error_state(struct drm_device *dev, bool wedged,
const char *error_msg)
{
static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
uint32_t ecode;
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
......@@ -1143,30 +1156,22 @@ void i915_capture_error_state(struct drm_device *dev)
return;
}
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
dev->primary->index);
kref_init(&error->ref);
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
ecode = i915_error_generate_code(dev_priv, error);
if (!warned) {
DRM_INFO("GPU HANG [%x]\n", ecode);
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
warned = true;
}
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
i915_error_capture_msg(dev, error, wedged, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
......@@ -1174,8 +1179,19 @@ void i915_capture_error_state(struct drm_device *dev)
}
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
if (error) {
i915_error_state_free(&error->ref);
return;
}
if (!warned) {
DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
warned = true;
}
}
void i915_error_state_get(struct drm_device *dev,
......
......@@ -387,16 +387,15 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
assert_spin_locked(&dev_priv->irq_lock);
ret = !intel_crtc->cpu_fifo_underrun_disabled;
......@@ -415,7 +414,20 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
done:
return ret;
}
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return ret;
}
......@@ -482,7 +494,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
}
void
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
......@@ -506,7 +518,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
POSTING_READ(reg);
}
void
static void
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
{
......@@ -1296,8 +1308,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
i915_handle_error(dev, false, "GT error interrupt 0x%08x",
gt_iir);
}
if (gt_iir & GT_PARITY_ERROR(dev))
......@@ -1544,8 +1556,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
i915_handle_error(dev_priv->dev, false);
i915_handle_error(dev_priv->dev, false,
"VEBOX CS error interrupt 0x%08x",
pm_iir);
}
}
}
......@@ -1865,7 +1878,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe i;
enum pipe pipe;
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev);
......@@ -1876,14 +1889,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
for_each_pipe(i) {
if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
drm_handle_vblank(dev, i);
for_each_pipe(pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
drm_handle_vblank(dev, pipe);
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip_plane(dev, pipe);
}
}
......@@ -2277,11 +2290,18 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_device *dev, bool wedged)
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
va_start(args, fmt);
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
i915_capture_error_state(dev);
i915_capture_error_state(dev, wedged, error_msg);
i915_report_and_clear_eir(dev);
if (wedged) {
......@@ -2584,9 +2604,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
*/
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT) {
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Kicking stuck wait on %s",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
}
......@@ -2596,9 +2616,9 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
default:
return HANGCHECK_HUNG;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Kicking stuck semaphore on %s",
ring->name);
I915_WRITE_CTL(ring, tmp);
return HANGCHECK_KICK;
case 0:
......@@ -2720,7 +2740,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
}
if (rings_hung)
return i915_handle_error(dev, true);
return i915_handle_error(dev, true, "Ring hung");
if (busy_count)
/* Reset timer case chip hangs without another request
......@@ -3016,44 +3036,113 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
POSTING_READ(VLV_IER);
}
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask;
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
return;
dev_priv->display_irqs_enabled = true;
if (dev_priv->dev->irq_enabled)
valleyview_display_irqs_install(dev_priv);
}
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
if (!dev_priv->display_irqs_enabled)
return;
dev_priv->display_irqs_enabled = false;
if (dev_priv->dev->irq_enabled)
valleyview_display_irqs_uninstall(dev_priv);
}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
/*
*Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = (~enable_mask) |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(PIPESTAT(0), 0xffff);
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
......@@ -3184,6 +3273,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
int pipe;
if (!dev_priv)
......@@ -3197,8 +3287,14 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
dev_priv->irq_mask = 0;
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
......@@ -3337,7 +3433,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
......@@ -3519,7 +3617,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
......@@ -3756,7 +3856,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
......
......@@ -48,6 +48,7 @@ struct i915_params i915 __read_mostly = {
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
......@@ -157,3 +158,7 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled, 0=disabled [default])");
......@@ -174,6 +174,18 @@
#define VGA_CR_INDEX_CGA 0x3d4
#define VGA_CR_DATA_CGA 0x3d5
/*
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
#define INSTR_CLIENT_MASK 0xE0000000
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
#define INSTR_RC_CLIENT 0x3
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
#define INSTR_MEDIA_SUBCLIENT 0x2
/*
* Memory interface instructions used by the kernel
*/
......@@ -377,14 +389,30 @@
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
/* See the PUNIT HAS v0.8 for the below bits */
enum punit_power_well {
PUNIT_POWER_WELL_RENDER = 0,
PUNIT_POWER_WELL_MEDIA = 1,
PUNIT_POWER_WELL_DISP2D = 3,
PUNIT_POWER_WELL_DPIO_CMN_BC = 5,
PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6,
PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7,
PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8,
PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_NUM,
};
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_CLK_GATE 1
#define PUNIT_PWR_RESET 2
#define PUNIT_PWR_GATE 3
#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2))
#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2))
#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2))
#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2))
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
......@@ -798,7 +826,12 @@
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
#define GEN7_GT_MODE 0x7008
#define GEN6_WIZ_HASHING(hi, lo) (((hi) << 9) | ((lo) << 7))
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520
......@@ -944,6 +977,9 @@
#define GEN6_BLITTER_LOCK_SHIFT 16
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
......@@ -1121,13 +1157,6 @@
#define FBC_REND_NUKE (1<<2)
#define FBC_REND_CACHE_CLEAN (1<<1)
#define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0
#define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4
#define HSW_BYPASS_FBC_QUEUE (1<<22)
#define HSW_PIPE_SLICE_CHICKEN_1(pipe) _PIPE(pipe, + \
_HSW_PIPE_SLICE_CHICKEN_1_A, + \
_HSW_PIPE_SLICE_CHICKEN_1_B)
/*
* GPIO regs
*/
......@@ -4140,7 +4169,8 @@
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define DPRS_MASK_VBLANK_SRD (1 << 0)
#define HSW_FBCQ_DIS (1 << 22)
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL 0x45000
......@@ -4164,7 +4194,7 @@
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
......@@ -4898,6 +4928,9 @@
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
#define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
......@@ -5043,6 +5076,10 @@
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
......
......@@ -599,14 +599,14 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
{
struct bdb_mipi *mipi;
mipi = find_section(bdb, BDB_MIPI);
mipi = find_section(bdb, BDB_MIPI_CONFIG);
if (!mipi) {
DRM_DEBUG_KMS("No MIPI BDB found");
return;
}
/* XXX: add more info */
dev_priv->vbt.dsi.panel_id = mipi->panel_id;
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
......
......@@ -104,7 +104,8 @@ struct vbios_data {
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_MIPI 50
#define BDB_MIPI_CONFIG 52
#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
......@@ -711,44 +712,159 @@ int intel_parse_bios(struct drm_device *dev);
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
/* MIPI DSI panel info */
struct bdb_mipi {
u16 panel_id;
u16 bridge_revision;
/* General params */
u32 dithering:1;
u32 bpp_pixel_format:1;
u32 rsvd1:1;
u32 dphy_valid:1;
u32 resvd2:28;
/* Block 52 contains MIPI Panel info
* 6 such enteries will there. Index into correct
* entery is based on the panel_index in #40 LFP
*/
#define MAX_MIPI_CONFIGURATIONS 6
u16 port_info;
u16 rsvd3:2;
u16 num_lanes:2;
u16 rsvd4:12;
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
/* DSI config */
u16 virt_ch_num:2;
u16 vtm:2;
u16 rsvd5:12;
struct mipi_config {
u16 panel_id;
u32 dsi_clock;
/* General Params */
u32 enable_dithering:1;
u32 rsvd1:1;
u32 is_bridge:1;
u32 panel_arch_type:2;
u32 is_cmd_mode:1;
#define NON_BURST_SYNC_PULSE 0x1
#define NON_BURST_SYNC_EVENTS 0x2
#define BURST_MODE 0x3
u32 video_transfer_mode:2;
u32 cabc_supported:1;
u32 pwm_blc:1;
/* Bit 13:10 */
#define PIXEL_FORMAT_RGB565 0x1
#define PIXEL_FORMAT_RGB666 0x2
#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
#define PIXEL_FORMAT_RGB888 0x4
u32 videomode_color_format:4;
/* Bit 15:14 */
#define ENABLE_ROTATION_0 0x0
#define ENABLE_ROTATION_90 0x1
#define ENABLE_ROTATION_180 0x2
#define ENABLE_ROTATION_270 0x3
u32 rotation:2;
u32 bta_enabled:1;
u32 rsvd2:15;
/* 2 byte Port Description */
#define DUAL_LINK_NOT_SUPPORTED 0
#define DUAL_LINK_FRONT_BACK 1
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
u16 rsvd3:12;
u16 rsvd4;
u8 rsvd5[5];
u32 dsi_ddr_clk;
u32 bridge_ref_clk;
u16 rsvd_pwr;
/* Dphy Params */
u32 prepare_cnt:5;
u32 rsvd6:3;
#define BYTE_CLK_SEL_20MHZ 0
#define BYTE_CLK_SEL_10MHZ 1
#define BYTE_CLK_SEL_5MHZ 2
u8 byte_clk_sel:2;
u8 rsvd6:6;
/* DPHY Flags */
u16 dphy_param_valid:1;
u16 eot_pkt_disabled:1;
u16 enable_clk_stop:1;
u16 rsvd7:13;
u32 hs_tx_timeout;
u32 lp_rx_timeout;
u32 turn_around_timeout;
u32 device_reset_timer;
u32 master_init_timer;
u32 dbi_bw_timer;
u32 lp_byte_clk_val;
/* 4 byte Dphy Params */
u32 prepare_cnt:6;
u32 rsvd8:2;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
u32 rsvd7:3;
u32 rsvd9:3;
u32 exit_zero_cnt:6;
u32 rsvd8:2;
u32 rsvd10:2;
u32 hl_switch_cnt;
u32 lp_byte_clk;
u32 clk_lane_switch_cnt;
u32 hl_switch_cnt;
u32 rsvd11[6];
/* timings based on dphy spec */
u8 tclk_miss;
u8 tclk_post;
u8 rsvd12;
u8 tclk_pre;
u8 tclk_prepare;
u8 tclk_settle;
u8 tclk_term_enable;
u8 tclk_trail;
u16 tclk_prepare_clkzero;
u8 rsvd13;
u8 td_term_enable;
u8 teot;
u8 ths_exit;
u8 ths_prepare;
u16 ths_prepare_hszero;
u8 rsvd14;
u8 ths_settle;
u8 ths_skip;
u8 ths_trail;
u8 tinit;
u8 tlpx;
u8 rsvd15[3];
/* GPIOs */
u8 panel_enable;
u8 bl_enable;
u8 pwm_enable;
u8 reset_r_n;
u8 pwr_down_r;
u8 stdby_r_n;
} __packed;
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data
* block below
*
* all delays has a unit of 100us
*/
struct mipi_pps_data {
u16 panel_on_delay;
u16 bl_enable_delay;
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
};
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
};
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
* can be maximum of 6 blocks
*/
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
};
#endif /* _I830_BIOS_H_ */
......@@ -68,8 +68,13 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
......@@ -262,6 +267,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev))
pipe_config->port_clock = 135000 * 2;
return true;
}
......@@ -630,14 +639,22 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
intel_runtime_pm_get(dev_priv);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, drm_get_connector_name(connector),
force);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
......@@ -645,23 +662,30 @@ intel_crt_detect(struct drm_connector *connector, bool force)
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
status = connector_status_connected;
goto out;
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
}
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
if (intel_crt_detect_ddc(connector)) {
status = connector_status_connected;
goto out;
}
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
if (I915_HAS_HOTPLUG(dev))
return connector_status_disconnected;
if (I915_HAS_HOTPLUG(dev)) {
status = connector_status_disconnected;
goto out;
}
if (!force)
return connector->status;
if (!force) {
status = connector->status;
goto out;
}
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
......@@ -673,6 +697,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
} else
status = connector_status_unknown;
out:
intel_display_power_put(dev_priv, power_domain);
intel_runtime_pm_put(dev_priv);
return status;
}
......@@ -686,17 +714,28 @@ static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
return intel_crt_ddc_get_modes(connector, i2c);
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
}
static int intel_crt_set_property(struct drm_connector *connector,
......
......@@ -1145,9 +1145,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
......
......@@ -1122,7 +1122,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
if (!intel_display_power_enabled(dev_priv->dev,
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
......@@ -1188,16 +1188,16 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = dev_priv->dev;
int reg, i;
int reg, sprite;
u32 val;
if (IS_VALLEYVIEW(dev)) {
for (i = 0; i < INTEL_INFO(dev)->num_sprites; i++) {
reg = SPCNTR(pipe, i);
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
WARN((val & SP_ENABLE),
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, i), pipe_name(pipe));
sprite_name(pipe, sprite), pipe_name(pipe));
}
} else if (INTEL_INFO(dev)->gen >= 7) {
reg = SPRCTL(pipe);
......@@ -2321,6 +2321,25 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
return ret;
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
......@@ -2331,6 +2350,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb;
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
DRM_ERROR("pipe is still busy with an old pageflip\n");
return -EBUSY;
}
/* no fb bound */
if (!fb) {
DRM_ERROR("No FB bound\n");
......@@ -2956,25 +2980,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
return pending;
}
bool intel_has_pending_fb_unpin(struct drm_device *dev)
{
struct intel_crtc *crtc;
......@@ -3953,6 +3958,117 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct intel_digital_port *intel_dig_port;
switch (intel_encoder->type) {
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
WARN_ON_ONCE(!HAS_DDI(dev));
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
switch (intel_dig_port->port) {
case PORT_A:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
case PORT_C:
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
default:
WARN_ON_ONCE(1);
return POWER_DOMAIN_PORT_OTHER;
}
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
return POWER_DOMAIN_PORT_DSI;
default:
return POWER_DOMAIN_PORT_OTHER;
}
}
static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask;
enum transcoder transcoder;
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (pfit_enabled)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
mask |= BIT(intel_display_port_power_domain(intel_encoder));
return mask;
}
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
bool enable)
{
if (dev_priv->power_domains.init_power_on == enable)
return;
if (enable)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
dev_priv->power_domains.init_power_on = enable;
}
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
if (!crtc->base.enabled)
continue;
pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base);
for_each_power_domain(domain, pipe_domains[crtc->pipe])
intel_display_power_get(dev_priv, domain);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
for_each_power_domain(domain, crtc->enabled_power_domains)
intel_display_power_put(dev_priv, domain);
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
intel_display_set_init_power(dev_priv, false);
}
int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
......@@ -4113,6 +4229,7 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
if (req_cdclk != cur_cdclk)
valleyview_set_cdclk(dev, req_cdclk);
modeset_update_crtc_power_domains(dev);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
......@@ -5495,6 +5612,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
......@@ -6156,7 +6277,7 @@ int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
return bps / (link_bw * 8) + 1;
return DIV_ROUND_UP(bps, link_bw * 8);
}
static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
......@@ -6812,105 +6933,9 @@ static void hsw_update_package_c8(struct drm_device *dev)
mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
if (!dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = true;
__hsw_enable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
{
if (!HAS_PC8(dev_priv->dev))
return;
mutex_lock(&dev_priv->pc8.lock);
if (dev_priv->pc8.gpu_idle) {
dev_priv->pc8.gpu_idle = false;
__hsw_disable_package_c8(dev_priv);
}
mutex_unlock(&dev_priv->pc8.lock);
}
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
static unsigned long get_pipe_power_domains(struct drm_device *dev,
enum pipe pipe, bool pfit_enabled)
{
unsigned long mask;
enum transcoder transcoder;
transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (pfit_enabled)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
return mask;
}
void intel_display_set_init_power(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->power_domains.init_power_on == enable)
return;
if (enable)
intel_display_power_get(dev, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev, POWER_DOMAIN_INIT);
dev_priv->power_domains.init_power_on = enable;
}
static void modeset_update_power_wells(struct drm_device *dev)
{
unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
struct intel_crtc *crtc;
/*
* First get all needed power domains, then put all unneeded, to avoid
* any unnecessary toggling of the power wells.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
if (!crtc->base.enabled)
continue;
pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
crtc->pipe,
crtc->config.pch_pfit.enabled);
for_each_power_domain(domain, pipe_domains[crtc->pipe])
intel_display_power_get(dev, domain);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
enum intel_display_power_domain domain;
for_each_power_domain(domain, crtc->enabled_power_domains)
intel_display_power_put(dev, domain);
crtc->enabled_power_domains = pipe_domains[crtc->pipe];
}
intel_display_set_init_power(dev, false);
}
static void haswell_modeset_global_resources(struct drm_device *dev)
{
modeset_update_power_wells(dev);
modeset_update_crtc_power_domains(dev);
hsw_update_package_c8(dev);
}
......@@ -6961,6 +6986,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
......@@ -6986,7 +7015,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
if (!intel_display_power_enabled(dev,
if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
......@@ -7014,7 +7043,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_enabled(dev, pfit_domain))
if (intel_display_power_enabled(dev_priv, pfit_domain))
ironlake_get_pfit_config(crtc, pipe_config);
if (IS_HASWELL(dev))
......@@ -7549,7 +7578,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
DRM_DEBUG_KMS("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
......@@ -7560,7 +7589,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
unsigned alignment;
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
......@@ -7576,13 +7605,13 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_ERROR("failed to release fence for cursor");
DRM_DEBUG_KMS("failed to release fence for cursor");
goto fail_unpin;
}
......@@ -7593,7 +7622,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
......@@ -7692,7 +7721,7 @@ __intel_framebuffer_create(struct drm_device *dev,
return ERR_PTR(ret);
}
struct drm_framebuffer *
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
......@@ -8192,8 +8221,12 @@ void intel_mark_busy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
hsw_package_c8_gpu_busy(dev_priv);
if (dev_priv->mm.busy)
return;
hsw_disable_package_c8(dev_priv);
i915_update_gfx_val(dev_priv);
dev_priv->mm.busy = true;
}
void intel_mark_idle(struct drm_device *dev)
......@@ -8201,10 +8234,13 @@ void intel_mark_idle(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
hsw_package_c8_gpu_idle(dev_priv);
if (!dev_priv->mm.busy)
return;
dev_priv->mm.busy = false;
if (!i915.powersave)
return;
goto out;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
......@@ -8215,6 +8251,9 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private);
out:
hsw_enable_package_c8(dev_priv);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
......@@ -8678,6 +8717,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
if (i915_terminally_wedged(&dev_priv->gpu_error))
goto out_hang;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
......@@ -8752,6 +8794,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
free_work:
kfree(work);
if (ret == -EIO) {
out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
drm_send_vblank_event(dev, intel_crtc->pipe, event);
}
return ret;
}
......@@ -10555,10 +10604,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
.create_handle = intel_user_framebuffer_create_handle,
};
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int aligned_height;
int pitch_limit;
......@@ -10996,7 +11045,8 @@ void intel_modeset_suspend_hw(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, j, ret;
int sprite, ret;
enum pipe pipe;
drm_mode_config_init(dev);
......@@ -11033,13 +11083,13 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < INTEL_INFO(dev)->num_sprites; j++) {
ret = intel_plane_init(dev, i, j);
for_each_pipe(pipe) {
intel_crtc_init(dev, pipe);
for_each_sprite(pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
if (ret)
DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
pipe_name(i), sprite_name(i, j), ret);
pipe_name(pipe), sprite_name(pipe, sprite), ret);
}
}
......@@ -11056,7 +11106,9 @@ void intel_modeset_init(struct drm_device *dev)
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
mutex_lock(&dev->mode_config.mutex);
intel_modeset_setup_hw_state(dev, false);
mutex_unlock(&dev->mode_config.mutex);
}
static void
......@@ -11239,11 +11291,21 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
void i915_redisable_vga(struct drm_device *dev)
void i915_redisable_vga_power_on(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
}
void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
......@@ -11251,14 +11313,10 @@ void i915_redisable_vga(struct drm_device *dev)
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
}
i915_redisable_vga_power_on(dev);
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
......@@ -11602,7 +11660,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
intel_display_power_enabled_sw(dev, POWER_DOMAIN_PIPE(i));
intel_display_power_enabled_sw(dev_priv,
POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
......@@ -11640,7 +11699,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
intel_display_power_enabled_sw(dev,
intel_display_power_enabled_sw(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
......
......@@ -916,8 +916,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
......@@ -1333,7 +1333,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
......@@ -1485,7 +1486,14 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ(intel_dp->output_reg);
enum intel_display_power_domain power_domain;
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
return false;
......@@ -1868,9 +1876,11 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
edp_panel_vdd_on(intel_dp);
intel_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
edp_panel_vdd_off(intel_dp, true);
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
......@@ -3224,10 +3234,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
intel_runtime_pm_get(dev_priv);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
......@@ -3258,21 +3272,32 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
intel_display_power_put(dev_priv, power_domain);
intel_runtime_pm_put(dev_priv);
return status;
}
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
intel_display_power_put(dev_priv, power_domain);
if (ret)
return ret;
......@@ -3293,15 +3318,25 @@ static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
intel_display_power_put(dev_priv, power_domain);
return has_audio;
}
......
......@@ -609,6 +609,8 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
......@@ -732,7 +734,9 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_device *dev, bool enable);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
......@@ -871,18 +875,17 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
int intel_power_domains_init(struct drm_device *dev);
void intel_power_domains_remove(struct drm_device *dev);
bool intel_display_power_enabled(struct drm_device *dev,
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_enabled_sw(struct drm_device *dev,
bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_device *dev,
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_device *dev,
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_power_domains_init_hw(struct drm_device *dev);
void intel_set_power_well(struct drm_device *dev, bool enable);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
......
......@@ -243,11 +243,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
......@@ -488,8 +493,19 @@ static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
struct intel_encoder *intel_encoder = &intel_dsi->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status connector_status;
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
DRM_DEBUG_KMS("\n");
return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
intel_display_power_put(dev_priv, power_domain);
return connector_status;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
......
......@@ -289,7 +289,27 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
int i, j;
bool *save_enabled;
bool any_enabled = false;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
/*
* If the user specified any force options, just bail here
* and use that config.
*/
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
if (!enabled[i])
continue;
if (connector->force != DRM_FORCE_UNSPECIFIED)
return false;
}
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
......@@ -306,6 +326,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %d not enabled, skipping\n",
connector->base.id);
......@@ -320,6 +344,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
continue;
}
num_connectors_enabled++;
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
/*
......@@ -329,7 +355,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
*/
for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) {
any_enabled = false;
DRM_DEBUG_KMS("fallback: cloned configuration\n");
fallback = true;
goto out;
}
}
......@@ -372,11 +399,25 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
encoder->crtc->base.id,
modes[i]->name);
any_enabled = true;
fallback = false;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
fallback = true;
}
out:
if (!any_enabled) {
if (fallback) {
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled);
return false;
......
......@@ -667,8 +667,13 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
......@@ -909,11 +914,15 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector));
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
......@@ -941,31 +950,48 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
intel_display_power_put(dev_priv, power_domain);
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
return intel_ddc_get_modes(connector,
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
ret = intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
intel_display_power_put(dev_priv, power_domain);
return ret;
}
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
......@@ -975,6 +1001,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
kfree(edid);
}
intel_display_power_put(dev_priv, power_domain);
return has_audio;
}
......
......@@ -1076,7 +1076,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (new_bo->tiling_mode) {
DRM_ERROR("buffer used for overlay image can not be tiled\n");
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
......
此差异已折叠。
......@@ -571,7 +571,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
......@@ -1388,6 +1388,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
i915_cmd_parser_init_ring(ring);
return 0;
err_unmap:
......
......@@ -164,6 +164,38 @@ struct intel_ring_buffer {
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
/*
* Tables of commands the command parser needs to know about
* for this ring.
*/
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
/*
* Table of registers allowed in commands that read/write registers.
*/
const u32 *reg_table;
int reg_count;
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
const u32 *master_reg_table;
int master_reg_count;
/*
* Returns the bitmask for the length field of the specified command.
* Return 0 for an unrecognized/invalid command.
*
* If the command parser finds an entry for a command in the ring's
* cmd_tables, it gets the command's length based on the table entry.
* If not, it calls this function to determine the per-ring length field
* encoding for the command (i.e. certain opcode ranges use certain bits
* to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
static inline bool
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部