提交 85bd5ac3 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel into drm-next

- VBT code refactor for a clean split between parsing&using of firmware
  information (Jani)
- untangle the pll computation code, and splitting up the monster
  i9xx_crtc_compute_clocks (Ander)
- dsi support for bxt (Jani, Shashank Sharma and others)
- color manager (i.e. de-gamma, color conversion matrix & gamma support) from
  Lionel Landwerlin
- Vulkan hsw support in the command parser (Jordan Justen)
- large-scale renaming of intel_engine_cs variables/parameters to avoid the epic
  ring vs. engine confusion introduced in gen8 (Tvrtko Ursulin)
- few atomic patches from Maarten&Matt, big one is two-stage wm programming on ilk-bdw
- refactor driver load and add infrastructure to inject load failures for
  testing, from Imre
- various small things all over

* tag 'drm-intel-next-2016-03-30' of git://anongit.freedesktop.org/drm-intel: (179 commits)
  drm/i915: Update DRIVER_DATE to 20160330
  drm/i915: Call intel_dp_mst_resume() before resuming displays
  drm/i915: Fix races on fbdev
  drm/i915: remove unused dev_priv->render_reclock_avail
  drm/i915: move sdvo mappings to vbt data
  drm/i915: move edp low vswing config to vbt data
  drm/i915: use a substruct in vbt data for edp
  drm/i915: replace for_each_engine()
  drm/i915: introduce for_each_engine_id()
  drm/i915/bxt: Fix DSI HW state readout
  drm/i915: Remove vblank wait from hsw_enable_ips, v2.
  drm/i915: Tidy aliasing_gtt_bind_vma()
  drm/i915: Split PNV version of crtc_compute_clock()
  drm/i915: Split g4x_crtc_compute_clock()
  drm/i915: Split i8xx_crtc_compute_clock()
  drm/i915: Split CHV and VLV specific crtc_compute_clock() hooks
  drm/i915: Merge ironlake_compute_clocks() and ironlake_crtc_compute_clock()
  drm/i915: Move fp divisor calculation into ironlake_compute_dpll()
  drm/i915: Pass crtc_state->dpll directly to ->find_dpll()
  drm/i915: Simplify ironlake_crtc_compute_clock() CPU eDP case
  ...
......@@ -2153,7 +2153,11 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >ENUM</td>
<td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
<td valign="top" >Connector</td>
<td valign="top" >TBD</td>
<td valign="top" >When this property is set to Limited 16:235
and CTM is set, the hardware will be programmed with the
result of the multiplication of CTM by the limited range
matrix to ensure the pixels normaly in the range 0..1.0 are
remapped to the range 16/255..235/255.</td>
</tr>
<tr>
<td valign="top" >“audio”</td>
......@@ -3334,7 +3338,7 @@ int num_ioctls;</synopsis>
<title>Video BIOS Table (VBT)</title>
!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
!Idrivers/gpu/drm/i915/intel_bios.c
!Idrivers/gpu/drm/i915/intel_bios.h
!Idrivers/gpu/drm/i915/intel_vbt_defs.h
</sect2>
</sect1>
......
......@@ -56,3 +56,9 @@ config DRM_I915_USERPTR
selected to enabled full userptr support.
If in doubt, say "Y".
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
source drivers/gpu/drm/i915/Kconfig.debug
endmenu
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
default n
help
Choose this option to turn on extra driver debugging that may affect
performance but will catch some internal issues.
Recommended for driver developers only.
If in doubt, say "N".
......@@ -55,7 +55,9 @@ i915-y += intel_audio.o \
intel_atomic.o \
intel_atomic_plane.o \
intel_bios.o \
intel_color.o \
intel_display.o \
intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
......
......@@ -444,6 +444,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
......@@ -471,6 +472,25 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_L3SQCREG1),
REG32(GEN7_L3CNTLREG2),
REG32(GEN7_L3CNTLREG3),
};
static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
REG64_IDX(HSW_CS_GPR, 0),
REG64_IDX(HSW_CS_GPR, 1),
REG64_IDX(HSW_CS_GPR, 2),
REG64_IDX(HSW_CS_GPR, 3),
REG64_IDX(HSW_CS_GPR, 4),
REG64_IDX(HSW_CS_GPR, 5),
REG64_IDX(HSW_CS_GPR, 6),
REG64_IDX(HSW_CS_GPR, 7),
REG64_IDX(HSW_CS_GPR, 8),
REG64_IDX(HSW_CS_GPR, 9),
REG64_IDX(HSW_CS_GPR, 10),
REG64_IDX(HSW_CS_GPR, 11),
REG64_IDX(HSW_CS_GPR, 12),
REG64_IDX(HSW_CS_GPR, 13),
REG64_IDX(HSW_CS_GPR, 14),
REG64_IDX(HSW_CS_GPR, 15),
REG32(HSW_SCRATCH1,
.mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
.value = 0),
......@@ -500,6 +520,33 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
#undef REG64
#undef REG32
struct drm_i915_reg_table {
const struct drm_i915_reg_descriptor *regs;
int num_regs;
bool master;
};
static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
};
static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
{ ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
};
static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
{ gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
{ hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
};
static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
{ gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
{ hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
};
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
......@@ -555,7 +602,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
static bool validate_cmds_sorted(struct intel_engine_cs *ring,
static bool validate_cmds_sorted(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
......@@ -577,7 +624,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
ring->id, i, j, curr, previous);
engine->id, i, j, curr, previous);
ret = false;
}
......@@ -611,11 +658,18 @@ static bool check_sorted(int ring_id,
return ret;
}
static bool validate_regs_sorted(struct intel_engine_cs *ring)
static bool validate_regs_sorted(struct intel_engine_cs *engine)
{
return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
check_sorted(ring->id, ring->master_reg_table,
ring->master_reg_count);
int i;
const struct drm_i915_reg_table *table;
for (i = 0; i < engine->reg_table_count; i++) {
table = &engine->reg_tables[i];
if (!check_sorted(engine->id, table->regs, table->num_regs))
return false;
}
return true;
}
struct cmd_node {
......@@ -639,13 +693,13 @@ struct cmd_node {
*/
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
static int init_hash_table(struct intel_engine_cs *ring,
static int init_hash_table(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i, j;
hash_init(ring->cmd_hash);
hash_init(engine->cmd_hash);
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
......@@ -660,7 +714,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
return -ENOMEM;
desc_node->desc = desc;
hash_add(ring->cmd_hash, &desc_node->node,
hash_add(engine->cmd_hash, &desc_node->node,
desc->cmd.value & CMD_HASH_MASK);
}
}
......@@ -668,13 +722,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
return 0;
}
static void fini_hash_table(struct intel_engine_cs *ring)
static void fini_hash_table(struct intel_engine_cs *engine)
{
struct hlist_node *tmp;
struct cmd_node *desc_node;
int i;
hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
......@@ -690,18 +744,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
*
* Return: non-zero if initialization fails
*/
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
if (!IS_GEN7(ring->dev))
if (!IS_GEN7(engine->dev))
return 0;
switch (ring->id) {
switch (engine->id) {
case RCS:
if (IS_HASWELL(ring->dev)) {
if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
......@@ -710,26 +764,23 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
ring->reg_table = gen7_render_regs;
ring->reg_count = ARRAY_SIZE(gen7_render_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
if (IS_HASWELL(engine->dev)) {
engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
engine->reg_tables = ivb_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
if (IS_HASWELL(ring->dev)) {
if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
......@@ -737,44 +788,41 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
ring->reg_table = gen7_blt_regs;
ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
if (IS_HASWELL(engine->dev)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
ring->id);
engine->id);
BUG();
}
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(engine));
WARN_ON(!hash_empty(ring->cmd_hash));
WARN_ON(!hash_empty(engine->cmd_hash));
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
fini_hash_table(ring);
fini_hash_table(engine);
return ret;
}
ring->needs_cmd_parser = true;
engine->needs_cmd_parser = true;
return 0;
}
......@@ -786,21 +834,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
*/
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
{
if (!ring->needs_cmd_parser)
if (!engine->needs_cmd_parser)
return;
fini_hash_table(ring);
fini_hash_table(engine);
}
static const struct drm_i915_cmd_descriptor*
find_cmd_in_table(struct intel_engine_cs *ring,
find_cmd_in_table(struct intel_engine_cs *engine,
u32 cmd_header)
{
struct cmd_node *desc_node;
hash_for_each_possible(ring->cmd_hash, desc_node, node,
hash_for_each_possible(engine->cmd_hash, desc_node, node,
cmd_header & CMD_HASH_MASK) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
......@@ -822,18 +870,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
find_cmd(struct intel_engine_cs *ring,
find_cmd(struct intel_engine_cs *engine,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
const struct drm_i915_cmd_descriptor *desc;
u32 mask;
desc = find_cmd_in_table(ring, cmd_header);
desc = find_cmd_in_table(engine, cmd_header);
if (desc)
return desc;
mask = ring->get_cmd_length_mask(cmd_header);
mask = engine->get_cmd_length_mask(cmd_header);
if (!mask)
return NULL;
......@@ -848,12 +896,31 @@ static const struct drm_i915_reg_descriptor *
find_reg(const struct drm_i915_reg_descriptor *table,
int count, u32 addr)
{
if (table) {
int i;
int i;
for (i = 0; i < count; i++) {
if (i915_mmio_reg_offset(table[i].addr) == addr)
return &table[i];
}
for (i = 0; i < count; i++) {
if (i915_mmio_reg_offset(table[i].addr) == addr)
return &table[i];
return NULL;
}
static const struct drm_i915_reg_descriptor *
find_reg_in_tables(const struct drm_i915_reg_table *tables,
int count, bool is_master, u32 addr)
{
int i;
const struct drm_i915_reg_table *table;
const struct drm_i915_reg_descriptor *reg;
for (i = 0; i < count; i++) {
table = &tables[i];
if (!table->master || is_master) {
reg = find_reg(table->regs, table->num_regs,
addr);
if (reg != NULL)
return reg;
}
}
......@@ -963,18 +1030,18 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
{
if (!ring->needs_cmd_parser)
if (!engine->needs_cmd_parser)
return false;
if (!USES_PPGTT(ring->dev))
if (!USES_PPGTT(engine->dev))
return false;
return (i915.enable_cmd_parser == 1);
}
static bool check_cmd(const struct intel_engine_cs *ring,
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
const bool is_master,
......@@ -1004,17 +1071,14 @@ static bool check_cmd(const struct intel_engine_cs *ring,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
find_reg(ring->reg_table, ring->reg_count,
reg_addr);
if (!reg && is_master)
reg = find_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr);
find_reg_in_tables(engine->reg_tables,
engine->reg_table_count,
is_master,
reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr, *cmd, ring->id);
reg_addr, *cmd, engine->id);
return false;
}
......@@ -1087,7 +1151,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
dword, ring->id);
dword, engine->id);
return false;
}
}
......@@ -1113,7 +1177,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
......@@ -1147,7 +1211,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
if (*cmd == MI_BATCH_BUFFER_END)
break;
desc = find_cmd(ring, *cmd, &default_desc);
desc = find_cmd(engine, *cmd, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
......@@ -1179,7 +1243,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
if (!check_cmd(ring, desc, cmd, length, is_master,
if (!check_cmd(engine, desc, cmd, length, is_master,
&oacontrol_set)) {
ret = -EINVAL;
break;
......@@ -1223,6 +1287,7 @@ int i915_cmd_parser_get_version(void)
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
*/
return 5;
return 6;
}
此差异已折叠。
......@@ -50,6 +50,66 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line)
{
if (i915_load_fail_count >= i915.inject_load_failure)
return false;
if (++i915_load_fail_count == i915.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915.inject_load_failure, func, line);
return true;
}
return false;
}
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
"providing the dmesg log by booting with drm.debug=0xf"
void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
const char *fmt, ...)
{
static bool shown_bug_once;
struct device *dev = dev_priv->dev->dev;
bool is_error = level[1] <= KERN_ERR[1];
bool is_debug = level[1] == KERN_DEBUG[1];
struct va_format vaf;
va_list args;
if (is_debug && !(drm_debug & DRM_UT_DRIVER))
return;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
__builtin_return_address(0), &vaf);
if (is_error && !shown_bug_once) {
dev_notice(dev, "%s", FDO_BUG_MSG);
shown_bug_once = true;
}
va_end(args);
}
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
return i915.inject_load_failure &&
i915_load_fail_count == i915.inject_load_failure;
}
#define i915_load_error(dev_priv, fmt, ...) \
__i915_printk(dev_priv, \
i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
......@@ -87,16 +147,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_HAS_BSD:
value = intel_ring_initialized(&dev_priv->ring[VCS]);
value = intel_engine_initialized(&dev_priv->engine[VCS]);
break;
case I915_PARAM_HAS_BLT:
value = intel_ring_initialized(&dev_priv->ring[BCS]);
value = intel_engine_initialized(&dev_priv->engine[BCS]);
break;
case I915_PARAM_HAS_VEBOX:
value = intel_ring_initialized(&dev_priv->ring[VECS]);
value = intel_engine_initialized(&dev_priv->engine[VECS]);
break;
case I915_PARAM_HAS_BSD2:
value = intel_ring_initialized(&dev_priv->ring[VCS2]);
value = intel_engine_initialized(&dev_priv->engine[VCS2]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
......@@ -370,6 +430,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (i915_inject_load_failure())
return -ENODEV;
ret = intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
......@@ -430,11 +493,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
* Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. Now we should scan for the initial config
* only once hotplug handling is enabled, but due to screwed-up locking
* around kms/fbdev init we can't protect the fdbev initial config
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
* irqs are fully enabled. We protect the fbdev initial config scanning
* against hotplug events by waiting in intel_fbdev_output_poll_changed
* until the asynchronous thread has finished.
*/
intel_fbdev_initial_config_async(dev);
......@@ -444,7 +505,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
......@@ -453,6 +514,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_teardown_gmbus(dev);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini(dev_priv);
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
......@@ -472,8 +534,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
ap->ranges[0].base = dev_priv->ggtt.mappable_base;
ap->ranges[0].size = dev_priv->ggtt.mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
......@@ -853,6 +915,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
/* Snooping is broken on BXT A stepping. */
info->has_snoop = !info->has_llc;
info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
......@@ -929,6 +995,84 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
/**
* i915_driver_init_early - setup state not requiring device access
* @dev_priv: device private
*
* Initialize everything that is a "SW-only" state, that is state not
* requiring accessing the device or exposing the driver via kernel internal
* or userspace interfaces. Example steps belonging here: lock initialization,
* system memory allocation, setting up device specific attributes and
* function hooks not requiring accessing the device.
*/
static int i915_driver_init_early(struct drm_i915_private *dev_priv,
struct drm_device *dev,
struct intel_device_info *info)
{
struct intel_device_info *device_info;
int ret = 0;
if (i915_inject_load_failure())
return -ENODEV;
/* Setup the write-once "constant" device info */
device_info = (struct intel_device_info *)&dev_priv->info;
memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
return ret;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
intel_pm_setup(dev);
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
i915_gem_load_init(dev);
intel_display_crc_init(dev);
i915_dump_device_info(dev_priv);
/* Not all pre-production machines fall into this category, only the
* very first ones. Almost everything should work, except for maybe
* suspend/resume. And we don't implement workarounds that affect only
* pre-production machines. */
if (IS_HSW_EARLY_SDV(dev))
DRM_INFO("This is an early pre-production Haswell machine. "
"It may not be fully functional.\n");
return 0;
}
/**
* i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
* @dev_priv: device private
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
i915_gem_load_cleanup(dev_priv->dev);
i915_workqueues_cleanup(dev_priv);
}
static int i915_mmio_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
......@@ -970,84 +1114,73 @@ static void i915_mmio_cleanup(struct drm_device *dev)
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
* Setup minimal device state necessary for MMIO accesses later in the
* initialization sequence. The setup here should avoid any other device-wide
* side effects or exposing the driver via kernel internal or user space
* interfaces.
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv;
struct intel_device_info *info, *device_info;
int ret = 0;
uint32_t aperture_size;
info = (struct intel_device_info *) flags;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = dev_priv;
dev_priv->dev = dev;
struct drm_device *dev = dev_priv->dev;
int ret;
/* Setup the write-once "constant" device info */
device_info = (struct intel_device_info *)&dev_priv->info;
memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device;
if (i915_inject_load_failure())
return -ENODEV;
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
if (i915_get_bridge_dev(dev))
return -EIO;
ret = i915_workqueues_init(dev_priv);
ret = i915_mmio_setup(dev);
if (ret < 0)
goto out_free_priv;
goto put_bridge;
intel_pm_setup(dev);
intel_uncore_init(dev);
intel_runtime_pm_get(dev_priv);
return 0;
intel_display_crc_init(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
i915_dump_device_info(dev_priv);
return ret;
}
/* Not all pre-production machines fall into this category, only the
* very first ones. Almost everything should work, except for maybe
* suspend/resume. And we don't implement workarounds that affect only
* pre-production machines. */
if (IS_HSW_EARLY_SDV(dev))
DRM_INFO("This is an early pre-production Haswell machine. "
"It may not be fully functional.\n");
/**
* i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
* @dev_priv: device private
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto out_runtime_pm_put;
}
intel_uncore_fini(dev);
i915_mmio_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev);
}
ret = i915_mmio_setup(dev);
if (ret < 0)
goto put_bridge;
/**
* i915_driver_init_hw - setup state requiring device access
* @dev_priv: device private
*
* Setup state that requires accessing the device, but doesn't require
* exposing the driver via kernel internal or userspace interfaces.
*/
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t aperture_size;
int ret;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
if (i915_inject_load_failure())
return -ENODEV;
intel_uncore_init(dev);
intel_device_info_runtime_init(dev);
ret = i915_gem_gtt_init(dev);
if (ret)
goto out_uncore_fini;
return ret;
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
......@@ -1080,26 +1213,27 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
aperture_size = dev_priv->gtt.mappable_end;
aperture_size = dev_priv->ggtt.mappable_end;
dev_priv->gtt.mappable =
io_mapping_create_wc(dev_priv->gtt.mappable_base,
dev_priv->ggtt.mappable =
io_mapping_create_wc(dev_priv->ggtt.mappable_base,
aperture_size);
if (dev_priv->gtt.mappable == NULL) {
if (dev_priv->ggtt.mappable == NULL) {
ret = -EIO;
goto out_gtt;
}
dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
aperture_size);
intel_irq_init(dev_priv);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
intel_uncore_sanitize(dev);
intel_opregion_setup(dev);
i915_gem_load_init(dev);
i915_gem_shrinker_init(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
......@@ -1117,24 +1251,43 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG_DRIVER("can't enable MSI");
}
intel_device_info_runtime_init(dev);
return 0;
intel_init_dpio(dev_priv);
out_gtt:
i915_global_gtt_cleanup(dev);
if (INTEL_INFO(dev)->num_pipes) {
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
if (ret)
goto out_gem_unload;
}
return ret;
}
intel_power_domains_init(dev_priv);
/**
* i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
* @dev_priv: device private
*/
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
arch_phys_wc_del(dev_priv->ggtt.mtrr);
io_mapping_free(dev_priv->ggtt.mappable);
i915_global_gtt_cleanup(dev);
}
/**
* i915_driver_register - register the driver with the rest of the system
* @dev_priv: device private
*
* Perform any steps necessary to make the driver available via kernel
* internal or userspace interfaces.
*/
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
i915_gem_shrinker_init(dev_priv);
/*
* Notify a valid surface after modesetting,
* when running inside a VM.
......@@ -1144,48 +1297,107 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_setup_sysfs(dev);
if (INTEL_INFO(dev)->num_pipes) {
if (INTEL_INFO(dev_priv)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
}
if (IS_GEN5(dev))
if (IS_GEN5(dev_priv))
intel_gpu_ips_init(dev_priv);
intel_runtime_pm_enable(dev_priv);
i915_audio_component_init(dev_priv);
}
/**
* i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
* @dev_priv: device private
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
i915_audio_component_cleanup(dev_priv);
intel_gpu_ips_teardown();
acpi_video_unregister();
intel_opregion_fini(dev_priv->dev);
i915_teardown_sysfs(dev_priv->dev);
i915_gem_shrinker_cleanup(dev_priv);
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
*
* The driver load routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
int ret = 0;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = dev_priv;
/* Must be set before calling __i915_printk */
dev_priv->dev = dev;
ret = i915_driver_init_early(dev_priv, dev,
(struct intel_device_info *)flags);
if (ret < 0)
goto out_free_priv;
intel_runtime_pm_get(dev_priv);
ret = i915_driver_init_mmio(dev_priv);
if (ret < 0)
goto out_runtime_pm_put;
ret = i915_driver_init_hw(dev_priv);
if (ret < 0)
goto out_cleanup_mmio;
/*
* TODO: move the vblank init and parts of modeset init steps into one
* of the i915_driver_init_/i915_driver_register functions according
* to the role/effect of the given init step.
*/
if (INTEL_INFO(dev)->num_pipes) {
ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
if (ret)
goto out_cleanup_hw;
}
ret = i915_load_modeset_init(dev);
if (ret < 0)
goto out_cleanup_vblank;
i915_driver_register(dev_priv);
intel_runtime_pm_enable(dev_priv);
intel_runtime_pm_put(dev_priv);
return 0;
out_power_well:
intel_power_domains_fini(dev_priv);
out_cleanup_vblank:
drm_vblank_cleanup(dev);
out_gem_unload:
i915_gem_shrinker_cleanup(dev_priv);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
i915_global_gtt_cleanup(dev);
out_uncore_fini:
intel_uncore_fini(dev);
i915_mmio_cleanup(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
i915_gem_load_cleanup(dev);
out_cleanup_hw:
i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
intel_runtime_pm_put(dev_priv);
i915_workqueues_cleanup(dev_priv);
i915_driver_cleanup_early(dev_priv);
out_free_priv:
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
kfree(dev_priv);
return ret;
......@@ -1198,26 +1410,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_fbdev_fini(dev);
i915_audio_component_cleanup(dev_priv);
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
return ret;
}
intel_power_domains_fini(dev_priv);
intel_gpu_ips_teardown();
i915_teardown_sysfs(dev);
i915_gem_shrinker_cleanup(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister();
i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev);
......@@ -1246,31 +1447,24 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_opregion_fini(dev);
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
pm_qos_remove_request(&dev_priv->pm_qos);
intel_power_domains_fini(dev_priv);
i915_global_gtt_cleanup(dev);
i915_driver_cleanup_hw(dev_priv);
i915_driver_cleanup_mmio(dev_priv);
intel_uncore_fini(dev);
i915_mmio_cleanup(dev);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
i915_gem_load_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev);
i915_workqueues_cleanup(dev_priv);
i915_driver_cleanup_early(dev_priv);
kfree(dev_priv);
return 0;
......
......@@ -66,6 +66,11 @@ static struct drm_driver driver;
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
......@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1,
};
#define BDW_FEATURES \
HSW_FEATURES, \
BDW_COLORS
static const struct intel_device_info intel_broadwell_d_info = {
HSW_FEATURES,
BDW_FEATURES,
.gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
HSW_FEATURES,
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
HSW_FEATURES,
BDW_FEATURES,
.gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
HSW_FEATURES,
BDW_FEATURES,
.gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
......@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
CHV_COLORS,
};
static const struct intel_device_info intel_skylake_info = {
HSW_FEATURES,
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
HSW_FEATURES,
BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
......@@ -345,17 +355,18 @@ static const struct intel_device_info intel_broxton_info = {
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
BDW_COLORS,
};
static const struct intel_device_info intel_kabylake_info = {
HSW_FEATURES,
BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
HSW_FEATURES,
BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
......@@ -504,6 +515,7 @@ void intel_detect_pch(struct drm_device *dev)
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
......@@ -758,10 +770,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
intel_display_resume(dev);
intel_dp_mst_resume(dev);
intel_display_resume(dev);
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
......@@ -881,7 +893,7 @@ int i915_reset(struct drm_device *dev)
simulated = dev_priv->gpu_error.stop_rings != 0;
ret = intel_gpu_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (simulated) {
......
此差异已折叠。
此差异已折叠。
......@@ -345,12 +345,12 @@ void i915_gem_context_reset(struct drm_device *dev)
intel_lr_context_reset(dev, ctx);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
......@@ -413,7 +413,7 @@ void i915_gem_context_fini(struct drm_device *dev)
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
intel_gpu_reset(dev, ALL_ENGINES);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
......@@ -421,17 +421,17 @@ void i915_gem_context_fini(struct drm_device *dev)
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->ring[RCS].last_context);
WARN_ON(!dev_priv->engine[RCS].last_context);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = I915_NUM_RINGS; --i >= 0;) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
for (i = I915_NUM_ENGINES; --i >= 0;) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (ring->last_context) {
i915_gem_context_unpin(ring->last_context, ring);
ring->last_context = NULL;
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
......@@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
if (i915.enable_execlists) {
if (ring->init_context == NULL)
if (engine->init_context == NULL)
return 0;
ret = ring->init_context(req);
ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
......@@ -510,35 +510,35 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
i915_semaphore_is_enabled(engine->dev) ?
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
0;
int len, i, ret;
int len, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (IS_GEN6(engine->dev)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(ring->dev)->gen < 8)
else if (INTEL_INFO(engine->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(engine->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(req, len);
......@@ -546,54 +546,61 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (INTEL_INFO(engine->dev)->gen >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_ring_emit_reg(engine,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(engine,
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine,
i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(engine, MI_NOOP);
if (INTEL_INFO(ring->dev)->gen >= 7) {
if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_ring_emit_reg(engine,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(engine,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return ret;
}
static inline bool should_skip_switch(struct intel_engine_cs *ring,
static inline bool should_skip_switch(struct intel_engine_cs *engine,
struct intel_context *from,
struct intel_context *to)
{
......@@ -601,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
return false;
if (to->ppgtt && from == to &&
!(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return true;
return false;
}
static bool
needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt)
return false;
if (INTEL_INFO(ring->dev)->gen < 8)
if (INTEL_INFO(engine->dev)->gen < 8)
return true;
if (ring != &dev_priv->ring[RCS])
if (engine != &dev_priv->engine[RCS])
return true;
return false;
}
static bool
needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
u32 hw_flags)
needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
u32 hw_flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt)
return false;
if (!IS_GEN8(ring->dev))
if (!IS_GEN8(engine->dev))
return false;
if (ring != &dev_priv->ring[RCS])
if (engine != &dev_priv->engine[RCS])
return false;
if (hw_flags & MI_RESTORE_INHIBIT)
......@@ -648,25 +655,26 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
static int do_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = req->i915;
struct intel_context *from = engine->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
if (from != NULL && engine == &dev_priv->engine[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
if (should_skip_switch(ring, from, to))
if (should_skip_switch(engine, from, to))
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
if (engine == &dev_priv->engine[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
get_context_alignment(engine->dev),
0);
if (ret)
return ret;
}
......@@ -676,23 +684,23 @@ static int do_switch(struct drm_i915_gem_request *req)
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
from = ring->last_context;
from = engine->last_context;
if (needs_pd_load_pre(ring, to)) {
if (needs_pd_load_pre(engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(ring, to);
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (ring != &dev_priv->ring[RCS]) {
if (engine != &dev_priv->engine[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
......@@ -717,14 +725,14 @@ static int do_switch(struct drm_i915_gem_request *req)
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
needs_pd_load_post(ring, to, hw_flags));
WARN_ON(needs_pd_load_pre(engine, to) &&
needs_pd_load_post(engine, to, hw_flags));
ret = mi_set_context(req, hw_flags);
if (ret)
......@@ -733,8 +741,8 @@ static int do_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(ring, to, hw_flags)) {
trace_switch_mm(ring, to);
if (needs_pd_load_post(engine, to, hw_flags)) {
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
......@@ -787,11 +795,11 @@ static int do_switch(struct drm_i915_gem_request *req)
done:
i915_gem_context_reference(to);
ring->last_context = to;
engine->last_context = to;
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(req);
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
......@@ -800,7 +808,7 @@ static int do_switch(struct drm_i915_gem_request *req)
return 0;
unpin_out:
if (ring->id == RCS)
if (engine->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
......@@ -820,18 +828,18 @@ static int do_switch(struct drm_i915_gem_request *req)
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (req->ctx != ring->last_context) {
if (req->ctx != engine->last_context) {
i915_gem_context_reference(req->ctx);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = req->ctx;
if (engine->last_context)
i915_gem_context_unreference(engine->last_context);
engine->last_context = req->ctx;
}
return 0;
}
......@@ -937,7 +945,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
args->value = to_i915(dev)->gtt.base.total;
args->value = to_i915(dev)->ggtt.base.total;
break;
default:
ret = -EINVAL;
......
......@@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev)
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
int err = 0;
int i;
if (warned)
return 0;
for_each_ring(ring, dev_priv, i) {
list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
for_each_engine(engine, dev_priv) {
list_for_each_entry(obj, &engine->active_list,
engine_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
break;
} else if (!obj->active ||
obj->last_read_req[ring->id] == NULL) {
obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
ring->name, obj);
engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
ring->name,
engine->name,
obj, obj->base.write_domain);
err++;
}
......
......@@ -330,7 +330,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
......@@ -340,7 +340,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset);
}
......@@ -599,7 +599,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
struct intel_engine_cs *engine,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
......@@ -713,7 +713,7 @@ eb_vma_misplaced(struct i915_vma *vma)
}
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
struct intel_context *ctx,
bool *need_relocs)
......@@ -723,10 +723,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
int retry;
i915_gem_retire_requests_ring(ring);
i915_gem_retire_requests_ring(engine);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
......@@ -788,7 +788,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma,
engine,
need_relocs);
if (ret)
goto err;
}
......@@ -798,7 +800,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (drm_mm_node_allocated(&vma->node))
continue;
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
ret = i915_gem_execbuffer_reserve_vma(vma, engine,
need_relocs);
if (ret)
goto err;
}
......@@ -821,7 +824,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
struct intel_context *ctx)
......@@ -910,7 +913,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
&need_relocs);
if (ret)
goto err;
......@@ -938,7 +942,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
const unsigned other_rings = ~intel_ring_flag(req->ring);
const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
......@@ -948,7 +952,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
ret = i915_gem_object_sync(obj, req->ring, &req);
ret = i915_gem_object_sync(obj, req->engine, &req);
if (ret)
return ret;
}
......@@ -960,7 +964,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (flush_chipset)
i915_gem_chipset_flush(req->ring->dev);
i915_gem_chipset_flush(req->engine->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
......@@ -1062,12 +1066,12 @@ validate_exec_list(struct drm_device *dev,
static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring, const u32 ctx_id)
struct intel_engine_cs *engine, const u32 ctx_id)
{
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
......@@ -1080,8 +1084,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, ring);
if (i915.enable_execlists && !ctx->engine[engine->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
......@@ -1095,7 +1099,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
......@@ -1122,7 +1126,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
......@@ -1136,7 +1140,7 @@ void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
params->ring->gpu_caches_dirty = true;
params->engine->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->request, params->batch_obj, true);
......@@ -1146,11 +1150,11 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
......@@ -1160,18 +1164,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(engine, 0);
}
intel_ring_advance(ring);
intel_ring_advance(engine);
return 0;
}
static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
......@@ -1183,12 +1187,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct i915_vma *vma;
int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
ret = i915_parse_cmds(ring,
ret = i915_parse_cmds(engine,
batch_obj,
shadow_batch_obj,
batch_start_offset,
......@@ -1229,7 +1233,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *ring = params->ring;
struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
......@@ -1244,8 +1248,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
"%s didn't clear reload\n", ring->name);
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
"%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
......@@ -1253,7 +1257,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
......@@ -1280,17 +1284,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
if (ring == &dev_priv->ring[RCS] &&
if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(engine, INSTPM);
intel_ring_emit(engine, instp_mask << 16 | instp_mode);
intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
......@@ -1308,7 +1312,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
ret = ring->dispatch_execbuffer(params->request,
ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
......@@ -1365,7 +1369,7 @@ eb_get_batch(struct eb_vmas *eb)
#define I915_USER_RINGS (4)
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
......@@ -1408,12 +1412,12 @@ eb_select_ring(struct drm_i915_private *dev_priv,
return -EINVAL;
}
*ring = &dev_priv->ring[_VCS(bsd_idx)];
*ring = &dev_priv->engine[_VCS(bsd_idx)];
} else {
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
}
if (!intel_ring_initialized(*ring)) {
if (!intel_engine_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
......@@ -1432,7 +1436,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
......@@ -1459,7 +1463,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
ret = eb_select_ring(dev_priv, file, args, &ring);
ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
......@@ -1473,9 +1477,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
if (ring->id != RCS) {
if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
ring->name);
engine->name);
return -EINVAL;
}
......@@ -1488,7 +1492,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
......@@ -1500,7 +1504,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
vm = &dev_priv->gtt.base;
vm = &dev_priv->ggtt.base;
memset(&params_master, 0x00, sizeof(params_master));
......@@ -1522,7 +1526,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
&need_relocs);
if (ret)
goto err;
......@@ -1531,7 +1536,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
......@@ -1547,16 +1553,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) {
if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
parsed_batch_obj = i915_gem_execbuffer_parse(engine,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
......@@ -1608,7 +1614,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
req = i915_gem_request_alloc(ring, ctx);
req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
......@@ -1626,7 +1632,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->dev = dev;
params->file = file;
params->ring = ring;
params->engine = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
......
此差异已折叠。
......@@ -135,16 +135,13 @@ enum i915_ggtt_view_type {
};
struct intel_rotation_info {
unsigned int height;
unsigned int pitch;
unsigned int uv_offset;
uint32_t pixel_format;
uint64_t fb_modifier;
unsigned int width_pages, height_pages;
uint64_t size;
unsigned int width_pages_uv, height_pages_uv;
uint64_t size_uv;
unsigned int uv_start_page;
struct {
/* tiles */
unsigned int width, height;
} plane[2];
};
struct i915_ggtt_view {
......@@ -342,13 +339,14 @@ struct i915_address_space {
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
struct i915_ggtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
size_t size; /* Total size of Global GTT */
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
......@@ -360,10 +358,7 @@ struct i915_gtt {
int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
u64 *mappable_end);
int (*probe)(struct i915_ggtt *ggtt);
};
struct i915_hw_ppgtt {
......
......@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
drm_gem_object_unreference(&so->obj->base);
}
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so)
{
int ret;
if (WARN_ON(ring->id != RCS))
if (WARN_ON(engine->id != RCS))
return -ENOENT;
ret = render_state_init(so, ring->dev);
ret = render_state_init(so, engine->dev);
if (ret)
return ret;
......@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
struct render_state so;
int ret;
ret = i915_gem_render_state_prepare(req->ring, &so);
ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
if (so.aux_batch_size > 8) {
ret = req->ring->dispatch_execbuffer(req,
ret = req->engine->dispatch_execbuffer(req,
(so.ggtt_offset +
so.aux_batch_offset),
so.aux_batch_size,
......
......@@ -43,7 +43,7 @@ struct render_state {
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */
......@@ -74,7 +74,7 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
{
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0,
dev_priv->gtt.stolen_usable_size);
dev_priv->ggtt.stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
......@@ -134,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp);
tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size;
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_845G(dev)) {
u32 tseg_size = 0;
u32 tom;
......@@ -158,7 +158,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size;
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_I830(dev)) {
u32 tseg_size = 0;
u32 tom;
......@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->gtt.stolen_size;
base = tom - tseg_size - dev_priv->ggtt.stolen_size;
}
if (base == 0)
......@@ -189,8 +189,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct {
u32 start, end;
} stolen[2] = {
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, },
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, },
};
u64 gtt_start, gtt_end;
......@@ -200,7 +200,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
gtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start;
......@@ -211,10 +211,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
......@@ -223,7 +223,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->gtt.stolen_size - 1);
base, base + (u32) dev_priv->ggtt.stolen_size - 1);
}
}
......@@ -233,7 +233,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
......@@ -245,7 +245,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1,
dev_priv->ggtt.stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
......@@ -253,7 +253,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base, base + (uint32_t)dev_priv->ggtt.stolen_size);
base = 0;
}
}
......@@ -278,7 +278,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->gtt.stolen_size;
dev_priv->ggtt.stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
......@@ -372,7 +372,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
......@@ -401,14 +401,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
}
#endif
if (dev_priv->gtt.stolen_size == 0)
if (dev_priv->ggtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
......@@ -458,18 +458,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0;
}
dev_priv->gtt.stolen_reserved_base = reserved_base;
dev_priv->gtt.stolen_reserved_size = reserved_size;
dev_priv->ggtt.stolen_reserved_base = reserved_base;
dev_priv->ggtt.stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
dev_priv->gtt.stolen_size >> 10,
(dev_priv->gtt.stolen_size - reserved_total) >> 10);
dev_priv->ggtt.stolen_size >> 10,
(dev_priv->ggtt.stolen_size - reserved_total) >> 10);
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
reserved_total;
/*
......@@ -483,7 +483,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
return 0;
}
......@@ -497,7 +497,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
......@@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct i915_address_space *ggtt = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
......
......@@ -758,6 +758,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int ret;
u32 handle;
if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
/* We cannot support coherent userptr objects on hw without
* LLC and broken snooping.
*/
return -ENODEV;
}
if (args->flags & ~(I915_USERPTR_READ_ONLY |
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
......
......@@ -377,11 +377,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
int i;
enum intel_engine_id id;
memset(&desc, 0, sizeof(desc));
......@@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
for_each_ring(ring, dev_priv, i) {
struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
for_each_engine_id(engine, dev_priv, id) {
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
......@@ -402,27 +402,27 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
obj = ctx->engine[i].state;
obj = ctx->engine[id].state;
if (!obj)
break; /* XXX: continue? */
ctx_desc = intel_lr_context_descriptor(ctx, ring);
ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(ring->guc_id << GUC_ELC_ENGINE_OFFSET);
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[i].ringbuf->obj;
obj = ctx->engine[id].ringbuf->obj;
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << ring->guc_id);
desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
......@@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
(rq->ring->guc_id << WQ_TARGET_SHIFT) |
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
rq->engine);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->ringbuf->tail >> 3;
......@@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
unsigned int engine_id = rq->ring->guc_id;
unsigned int engine_id = rq->engine->guc_id;
int q_ret, b_ret;
q_ret = guc_add_workqueue_item(client, rq);
......@@ -839,9 +840,9 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
struct intel_engine_cs *ring;
struct intel_engine_cs *engine;
struct page *page;
u32 size, i;
u32 size;
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
......@@ -867,11 +868,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
ring = &dev_priv->ring[RCS];
ads->golden_context_lrca = ring->status_page.gfx_addr;
engine = &dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
for_each_engine(engine, dev_priv)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
......@@ -883,12 +884,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
for_each_ring(ring, dev_priv, i) {
reg_state->mmio_white_list[ring->guc_id].mmio_start =
ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
for_each_engine(engine, dev_priv) {
reg_state->mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
reg_state->mmio_white_list[ring->guc_id].count = 0;
reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
......
此差异已折叠。
......@@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = {
.edp_vswing = 0,
.enable_guc_submission = false,
.guc_log_level = -1,
.enable_dp_mst = true,
.inject_load_failure = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
......@@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)")
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
MODULE_PARM_DESC(enable_dp_mst,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
......@@ -49,6 +49,7 @@ struct i915_params {
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
......@@ -59,6 +60,7 @@ struct i915_params {
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
bool enable_dp_mst;
};
extern struct i915_params i915 __read_mostly;
......
此差异已折叠。
......@@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
......@@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
......@@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count;
}
......@@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
......@@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
......@@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return count;
}
......
......@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->ring->id;
__entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
......@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
struct intel_engine_cs *engine =
i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
i915_trace_irq_get(ring, req);
i915_trace_irq_get(engine, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
......@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
__entry->dev = req->ring->dev->primary->index;
__entry->ring = req->ring->id;
__entry->dev = req->engine->dev->primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
......@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
struct intel_engine_cs *engine =
i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
......@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
);
TRACE_EVENT(i915_gem_request_notify,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_PROTO(struct intel_engine_cs *engine),
TP_ARGS(engine),
TP_STRUCT__entry(
__field(u32, dev)
......@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = ring->get_seqno(ring, false);
__entry->dev = engine->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine, false);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
......@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
struct intel_engine_cs *ring =
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
struct intel_engine_cs *engine =
i915_gem_request_get_engine(req);
__entry->dev = engine->dev->primary->index;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
mutex_is_locked(&engine->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
......@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
TP_ARGS(ring, to),
TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
......@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
),
TP_fast_assign(
__entry->ring = ring->id;
__entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = ring->dev->primary->index;
__entry->dev = engine->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
......
......@@ -181,7 +181,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
unsigned long mappable_base, mappable_size, mappable_end;
......@@ -203,18 +203,18 @@ int intel_vgt_balloon(struct drm_device *dev)
unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start ||
mappable_end > dev_priv->gtt.mappable_end ||
unmappable_base < dev_priv->gtt.mappable_end ||
mappable_end > dev_priv->ggtt.mappable_end ||
unmappable_base < dev_priv->ggtt.mappable_end ||
unmappable_end > ggtt_vm_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->gtt.mappable_end) {
if (unmappable_base > dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[2],
dev_priv->gtt.mappable_end,
dev_priv->ggtt.mappable_end,
unmappable_base);
if (ret)
......@@ -244,11 +244,11 @@ int intel_vgt_balloon(struct drm_device *dev)
goto err;
}
if (mappable_end < dev_priv->gtt.mappable_end) {
if (mappable_end < dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[1],
mappable_end,
dev_priv->gtt.mappable_end);
dev_priv->ggtt.mappable_end);
if (ret)
goto err;
......
......@@ -96,8 +96,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->wm_changed = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
return &crtc_state->base;
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册