提交 b1c4f7fe 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2019-04-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- uAPI "Fixes:" patch for the upcoming kernel 5.1, included here too

  We have an Ack from the media folks (only current user) for this
  late tweak

Cross-subsystem Changes:

- ALSA: hda: Fix racy display power access (Takashi, Chris)

Driver Changes:

- DDI and MIPI-DSI clocks fixes for Icelake (Vandita)
- Fix Icelake frequency change/locking (RPS) (Mika)
- Temporarily disable ppGTT read-only bit on Icelake (Mika)
- Add missing Icelake W/As (Mika)
- Enable 12 deep CSB status FIFO on Icelake (Mika)
- Inherit more Icelake code for Elkhartlake (Bob, Jani)

- Handle catastrophic error on engine reset (Mika)
- Shortcut readiness to reset check (Mika)
- Regression fix for GEM_BUSY causing us to report a mixed uabi-class request as not busy (Chris)
- Revert back to max link rate and lane count on eDP (Jani)
- Fix pipe BPP readout for BXT/GLK DSI (Ville)
- Set DP min_bpp to 8*3 for non-RGB output formats (Ville)
- Enable coarse preemption boundaries for Gen8 (Chris)
- Do not enable FEC without DSC (Ville)
- Restore correct BXT DDI latency optim setting calculation (Ville)
- Always reset context's RING registers to avoid running workload twice during reset (Chris)
- Set GPU wedged on driver unload (Janusz)
- Consolidate two similar barries from timeline into one (Chris)
- Only reset the pinned kernel contexts on resume (Chris)
- Wakeref tracking improvements (Chris, Imre)
- Lockdep fixes for shrinker interactions (Chris)
- Bump ready tasks ahead of busywaits in prep of semaphore use (Chris)

- Huge step in splitting display code into fine grained files (Jani)
- Refactor the IRQ init/reset macros for code saving (Paulo)
- Convert IRQ initialization code to uncore MMIO access (Paulo)
- Convert workarounds code to use uncore MMIO access (Chris)
- Nuke drm_crtc_state and use intel_atomic_state instead (Manasi)
- Update SKL clock-gating WA (Radhakrishna, Ville)
- Isolate GuC reset code flow (Chris)
- Expose force_dsc_enable through debugfs (Manasi)
- Header standalone compile testing framework (Jani)
- Code cleanups to reduce driver footprint (Chris)
- PSR code fixes and cleanups (Jose)
- Sparse and kerneldoc updates (Chris)
- Suppress spurious combo PHY B warning (Vile)
Signed-off-by: NDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190418080426.GA6409@jlahtine-desk.ger.corp.intel.com
......@@ -525,7 +525,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_IDS(&gen6_early_ops),
INTEL_PINEVIEW_IDS(&gen3_early_ops),
INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_early_ops),
INTEL_I965GM_IDS(&gen3_early_ops),
......@@ -547,6 +548,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
INTEL_ICL_11_IDS(&gen11_early_ops),
INTEL_EHL_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
......
......@@ -32,10 +32,13 @@ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Extra header tests
include $(src)/Makefile.header-test
# Please keep these build lists sorted!
# core driver code
i915-y := i915_drv.o \
i915-y += i915_drv.o \
i915_irq.o \
i915_memcpy.o \
i915_mm.o \
......@@ -57,15 +60,6 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# Test the headers are compilable as standalone units
i915-$(CONFIG_DRM_I915_WERROR) += \
test_i915_active_types_standalone.o \
test_i915_gem_context_types_standalone.o \
test_i915_timeline_types_standalone.o \
test_intel_context_types_standalone.o \
test_intel_engine_types_standalone.o \
test_intel_workarounds_types_standalone.o
# GEM code
i915-y += \
i915_active.o \
......
# SPDX-License-Identifier: MIT
# Copyright © 2019 Intel Corporation
# Test the headers are compilable as standalone units
header_test := \
i915_active_types.h \
i915_gem_context_types.h \
i915_priolist_types.h \
i915_scheduler_types.h \
i915_timeline_types.h \
intel_atomic_plane.h \
intel_audio.h \
intel_cdclk.h \
intel_color.h \
intel_connector.h \
intel_context_types.h \
intel_crt.h \
intel_csr.h \
intel_ddi.h \
intel_dp.h \
intel_dvo.h \
intel_engine_types.h \
intel_fbc.h \
intel_fbdev.h \
intel_frontbuffer.h \
intel_hdcp.h \
intel_hdmi.h \
intel_lspcon.h \
intel_lvds.h \
intel_panel.h \
intel_pipe_crc.h \
intel_pm.h \
intel_psr.h \
intel_sdvo.h \
intel_sprite.h \
intel_tv.h \
intel_workarounds_types.h
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
header_test_%.c: %.h
$(call cmd,header_test)
i915-$(CONFIG_DRM_I915_WERROR) += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
......@@ -1077,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
......@@ -1100,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
if (ret)
return ret;
if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
patch_value(s, cmd_ptr(s, 1), val);
}
}
}
}
......@@ -1317,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
if (info->plane == PLANE_PRIMARY)
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
if (info->async_flip)
intel_vgpu_trigger_virtual_event(vgpu, info->event);
else
set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
return 0;
}
......@@ -1563,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
......@@ -1573,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
if (ret)
return ret;
if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
patch_value(s, cmd_ptr(s, 0), val);
}
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
......
......@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
......
......@@ -526,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
......@@ -541,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
unsigned int tmp;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
......
......@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask);
intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
......@@ -2504,6 +2504,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
......@@ -2524,6 +2525,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
if (!oos_page->mem) {
ret = -ENOMEM;
kfree(oos_page);
goto fail;
}
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
......
......@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
unsigned char mem[I915_GTT_PAGE_SIZE];
void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
......
......@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
......@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
#define INTEL_GVT_MAX_PIPE 4
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
......@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
......@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
#define vgpu_sreg_t(vgpu, reg) \
(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
#define vgpu_sreg(vgpu, offset) \
(*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
......@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
......
......@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int engine_mask = 0;
intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
......@@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
unsigned int index = DSPSURF_TO_PIPE(offset);
i915_reg_t surflive_reg = DSPSURFLIVE(index);
int flip_event[] = {
[PIPE_A] = PRIMARY_A_FLIP_DONE,
[PIPE_B] = PRIMARY_B_FLIP_DONE,
[PIPE_C] = PRIMARY_C_FLIP_DONE,
};
u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
......@@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
unsigned int index = SPRSURF_TO_PIPE(offset);
i915_reg_t surflive_reg = SPRSURFLIVE(index);
int flip_event[] = {
[PIPE_A] = SPRITE_A_FLIP_DONE,
[PIPE_B] = SPRITE_B_FLIP_DONE,
[PIPE_C] = SPRITE_C_FLIP_DONE,
};
u32 pipe = SPRSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
return 0;
}
static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
if (plane == PLANE_PRIMARY) {
vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
} else {
vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
}
if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
intel_vgpu_trigger_virtual_event(vgpu, event);
else
set_bit(event, vgpu->irq.flip_done_event[pipe]);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
......@@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
......@@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
......@@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
......@@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
......@@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
......@@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
......@@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
MMIO_D(DC_STATE_EN, D_SKL_PLUS);
MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
MMIO_D(CDCLK_CTL, D_SKL_PLUS);
MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
......@@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
MMIO_D(SKL_DFSM, D_SKL_PLUS);
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
......@@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
......@@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
......@@ -3265,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
......@@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
u32 old_vreg = 0, old_sreg = 0;
u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
......@@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
| (vgpu_sreg(vgpu, offset) & mask);
}
}
......
......@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
......@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
......@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
intel_vgpu_reset_mmio(vgpu, true);
return 0;
......@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
vgpu->mmio.vreg = NULL;
}
......@@ -68,7 +68,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
......@@ -119,7 +119,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
......
......@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
#define PLANE_CTL_ASYNC_FLIP (1 << 9)
#define REG50080_FLIP_TYPE_MASK 0x3
#define REG50080_FLIP_TYPE_ASYNC 0x1
#define REG_50080(_pipe, _plane) ({ \
typeof(_pipe) (p) = (_pipe); \
typeof(_plane) (q) = (_plane); \
(((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
(_MMIO(0x50090))) : \
(((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
(_MMIO(0x50098))) : \
(((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
(_MMIO(0x5009C))) : \
(_MMIO(0x50080))))); })
#define REG_50080_TO_PIPE(_reg) ({ \
typeof(_reg) (reg) = (_reg); \
(((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
(((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
(((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
(INVALID_PIPE)))); })
#define REG_50080_TO_PLANE(_reg) ({ \
typeof(_reg) (reg) = (_reg); \
(((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
(PLANE_PRIMARY) : \
(((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
(PLANE_SPRITE0) : (I915_MAX_PLANES))); })
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
......@@ -95,4 +126,7 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
/* define the effective range of MCHBAR register on Sandybridge+ */
#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
#endif
......@@ -850,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
unsigned long engine_mask)
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
unsigned int tmp;
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
......@@ -1149,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
unsigned long engine_mask)
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
......@@ -1239,7 +1239,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
......
......@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
unsigned long engine_mask);
intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
unsigned long engine_mask,
intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
......@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
unsigned long engine_mask);
intel_engine_mask_t engine_mask);
#endif
......@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask)
intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
......
......@@ -26,14 +26,21 @@
*
*/
#include <linux/sort.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include "intel_drv.h"
#include "intel_guc_submission.h"
#include "i915_reset.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbc.h"
#include "intel_guc_submission.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
#include "intel_psr.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
......@@ -826,11 +833,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
I915_READ(GEN2_IER));
seq_printf(m, "Interrupt identity: %08x\n",
I915_READ(IIR));
I915_READ(GEN2_IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
I915_READ(GEN2_IMR));
for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
......@@ -2087,8 +2094,8 @@ static int i915_llc(struct seq_file *m, void *data)
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
intel_uncore_edram_size(dev_priv)/1024/1024);
seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
dev_priv->edram_size_mb);
return 0;
}
......@@ -2245,7 +2252,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
struct intel_guc_client *client = guc->execbuf_client;
unsigned int tmp;
intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
......@@ -4814,6 +4821,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
yesno(crtc_state->dsc_params.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
yesno(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
......
......@@ -48,12 +48,19 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
#include "i915_reset.h"
#include "i915_query.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
#include "intel_csr.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_fbdev.h"
#include "intel_pm.h"
#include "intel_sprite.h"
#include "intel_uc.h"
#include "intel_workarounds.h"
......@@ -868,10 +875,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
intel_uncore_init_early(&dev_priv->uncore);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->av_mutex);
......@@ -954,7 +964,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (i915_get_bridge_dev(dev_priv))
return -EIO;
ret = intel_uncore_init(&dev_priv->uncore);
ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
......@@ -963,7 +973,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_device_info_init_mmio(dev_priv);
intel_uncore_prune(&dev_priv->uncore);
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
intel_uc_init_mmio(dev_priv);
......@@ -977,7 +987,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
intel_uncore_fini_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
......@@ -991,7 +1001,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini(&dev_priv->uncore);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
......@@ -1441,6 +1451,45 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
dram_info->ranks, yesno(dram_info->is_16gb_dimm));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
const unsigned int sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
sets[EDRAM_SETS_IDX(cap)];
}
static void edram_detect(struct drm_i915_private *dev_priv)
{
u32 edram_cap = 0;
if (!(IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv) ||
INTEL_GEN(dev_priv) >= 9))
return;
edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
if (!(edram_cap & EDRAM_ENABLED))
return;
/*
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128MB always.
*/
if (INTEL_GEN(dev_priv) < 9)
dev_priv->edram_size_mb = 128;
else
dev_priv->edram_size_mb =
gen9_edram_size_mb(dev_priv, edram_cap);
DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
* i915_driver_init_hw - setup state requiring device access
* @dev_priv: device private
......@@ -1483,6 +1532,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
edram_detect(dev_priv);
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
......@@ -1718,10 +1770,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
intel_subplatform(RUNTIME_INFO(dev_priv),
INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
......@@ -1764,8 +1818,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
......@@ -1862,6 +1914,13 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
/*
* After unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
i915_gem_set_wedged(dev_priv);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
......
......@@ -66,13 +66,14 @@
#include "intel_device_info.h"
#include "intel_display.h"
#include "intel_dpll_mgr.h"
#include "intel_frontbuffer.h"
#include "intel_lrc.h"
#include "intel_opregion.h"
#include "intel_ringbuffer.h"
#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
......@@ -92,8 +93,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20190328"
#define DRIVER_TIMESTAMP 1553776914
#define DRIVER_DATE "20190417"
#define DRIVER_TIMESTAMP 1555492067
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
......@@ -282,7 +283,8 @@ struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state);
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
......@@ -325,6 +327,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
* vblank evasion. The registers should then latch during the
......@@ -373,14 +376,6 @@ enum i915_cache_level {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
enum fb_op_origin {
ORIGIN_GTT,
ORIGIN_CPU,
ORIGIN_CS,
ORIGIN_FLIP,
ORIGIN_DIRTYFB,
};
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
......@@ -1628,6 +1623,8 @@ struct drm_i915_private {
struct intel_cdclk_state actual;
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
int force_min_cdclk;
} cdclk;
/**
......@@ -1706,8 +1703,11 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
/* Cannot be determined by PCIID. You must always read a register. */
u32 edram_cap;
/*
* edram size in MB.
* Cannot be determined by PCIID. You must always read a register.
*/
u32 edram_size_mb;
/*
* Protects RPS/RC6 register access and PCU communication.
......@@ -1747,6 +1747,7 @@ struct drm_i915_private {
*
*/
struct mutex av_mutex;
int audio_power_refcount;
struct {
struct mutex mutex;
......@@ -1994,7 +1995,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct i915_gt_timelines {
......@@ -2298,7 +2298,69 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
enum intel_platform p)
{
const unsigned int pbits =
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
/* Expand the platform_mask array if this fails. */
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
pbits * ARRAY_SIZE(info->platform_mask));
return p / pbits;
}
static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info *info,
enum intel_platform p)
{
const unsigned int pbits =
BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
return p % pbits + INTEL_SUBPLATFORM_BITS;
}
static inline u32
intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
const unsigned int pi = __platform_mask_index(info, p);
return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
}
static __always_inline bool
IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
{
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
const unsigned int pi = __platform_mask_index(info, p);
const unsigned int pb = __platform_mask_bit(info, p);
BUILD_BUG_ON(!__builtin_constant_p(p));
return info->platform_mask[pi] & BIT(pb);
}
static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private *i915,
enum intel_platform p, unsigned int s)
{
const struct intel_runtime_info *info = RUNTIME_INFO(i915);
const unsigned int pi = __platform_mask_index(info, p);
const unsigned int pb = __platform_mask_bit(info, p);
const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
const u32 mask = info->platform_mask[pi];
BUILD_BUG_ON(!__builtin_constant_p(p));
BUILD_BUG_ON(!__builtin_constant_p(s));
BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
/* Shift and test on the MSB position so sign flag can be used. */
return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
......@@ -2313,11 +2375,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
#define IS_IRONLAKE_M(dev_priv) \
(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
......@@ -2333,46 +2395,34 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe))
/* ULX machines are also considered ULT. */
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
#define IS_BDW_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
INTEL_DEVID(dev_priv) == 0x1913 || \
INTEL_DEVID(dev_priv) == 0x1916 || \
INTEL_DEVID(dev_priv) == 0x1921 || \
INTEL_DEVID(dev_priv) == 0x1926)
#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
INTEL_DEVID(dev_priv) == 0x1915 || \
INTEL_DEVID(dev_priv) == 0x191E)
#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
INTEL_DEVID(dev_priv) == 0x5913 || \
INTEL_DEVID(dev_priv) == 0x5916 || \
INTEL_DEVID(dev_priv) == 0x5921 || \
INTEL_DEVID(dev_priv) == 0x5926)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0 || \
INTEL_DEVID(dev_priv) == 0x87CA)
#define IS_HSW_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
#define IS_SKL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_SKL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_KBL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KBL_ULX(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_AML_ULX(dev_priv) \
(IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
......@@ -2383,16 +2433,16 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_ULT(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
INTEL_DEVID(dev_priv) != 0x8A51)
#define IS_CNL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ICL_WITH_PORT_F(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
......@@ -2451,7 +2501,6 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
#define ALL_ENGINES (~0u)
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
......@@ -2467,7 +2516,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
......@@ -2860,6 +2909,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
int pass = 2;
do {
rcu_barrier();
i915_gem_drain_freed_objects(i915);
drain_workqueue(i915->wq);
} while (--pass);
}
......
......@@ -50,6 +50,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include "intel_pm.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
......@@ -308,7 +309,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
static void
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
......@@ -2202,7 +2203,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
......@@ -2789,7 +2789,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
u64 remain, offset;
unsigned int pg;
/* Before we instantiate/pin the backing store for our use, we
/* Caller already validated user args */
GEM_BUG_ON(!access_ok(user_data, arg->size));
/*
* Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
......@@ -2803,7 +2807,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
/* Before the pages are instantiated the object is treated as being
/*
* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
......@@ -2819,20 +2824,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
struct page *page;
void *data, *vaddr;
int err;
char c;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
/* Prefault the user page to reduce potential recursion */
err = __get_user(c, user_data);
if (err)
return err;
err = __get_user(c, user_data + len - 1);
if (err)
return err;
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
vaddr = kmap(page);
unwritten = copy_from_user(vaddr + pg, user_data, len);
kunmap(page);
vaddr = kmap_atomic(page);
unwritten = __copy_from_user_inatomic(vaddr + pg,
user_data,
len);
kunmap_atomic(vaddr);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
......@@ -2840,8 +2857,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
/* We don't handle -EFAULT, leave it to the caller to check */
if (unwritten)
return -EFAULT;
return -ENODEV;
remain -= len;
user_data += len;
......@@ -3824,16 +3842,16 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
static __always_inline unsigned int __busy_read_flag(unsigned int id)
static __always_inline u32 __busy_read_flag(u8 id)
{
if (id == I915_ENGINE_CLASS_INVALID)
return 0xffff0000;
if (id == (u8)I915_ENGINE_CLASS_INVALID)
return 0xffff0000u;
GEM_BUG_ON(id >= 16);
return 0x10000 << id;
return 0x10000u << id;
}
static __always_inline unsigned int __busy_write_id(unsigned int id)
static __always_inline u32 __busy_write_id(u8 id)
{
/*
* The uABI guarantees an active writer is also amongst the read
......@@ -3844,15 +3862,14 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
if (id == I915_ENGINE_CLASS_INVALID)
return 0xffffffff;
if (id == (u8)I915_ENGINE_CLASS_INVALID)
return 0xffffffffu;
return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
__busy_set_if_active(const struct dma_fence *fence,
unsigned int (*flag)(unsigned int id))
__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
{
const struct i915_request *rq;
......@@ -3872,6 +3889,8 @@ __busy_set_if_active(const struct dma_fence *fence,
if (i915_request_completed(rq))
return 0;
/* Beware type-expansion follies! */
BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
return flag(rq->engine->uabi_class);
}
......@@ -4494,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
i915->gt.resume(i915);
intel_gt_resume(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
......@@ -4834,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_lr_context_resume;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} else {
dev_priv->gt.resume = intel_legacy_submission_resume;
else
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
i915_timelines_init(dev_priv);
......
......@@ -73,8 +73,6 @@ struct drm_i915_private;
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
#define I915_NUM_ENGINES 8
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
void i915_gem_park(struct drm_i915_private *i915);
......
......@@ -562,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915)
static bool needs_preempt_context(struct drm_i915_private *i915)
{
return HAS_LOGICAL_RING_PREEMPTION(i915);
return HAS_EXECLISTS(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
......@@ -858,9 +858,9 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
unsigned long engines,
intel_engine_mask_t engines,
int (*emit)(struct i915_request *rq, void *data),
void (*task)(void *data),
void *data)
......@@ -922,7 +922,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
}
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
unsigned long mask)
intel_engine_mask_t mask)
{
struct intel_engine_cs *engine;
......@@ -969,10 +969,10 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
return 0;
}
static int get_ppgtt(struct i915_gem_context *ctx,
static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
struct i915_hw_ppgtt *ppgtt;
int ret;
......@@ -1028,6 +1028,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
int i;
......@@ -1040,9 +1041,9 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
*cs++ = MI_LOAD_REGISTER_IMM(2);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0));
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
*cs++ = upper_32_bits(pd_daddr);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
*cs++ = lower_32_bits(pd_daddr);
*cs++ = MI_NOOP;
......@@ -1056,9 +1057,9 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
*cs++ = upper_32_bits(pd_daddr);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
*cs++ = lower_32_bits(pd_daddr);
}
*cs++ = MI_NOOP;
......@@ -1071,10 +1072,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
return 0;
}
static int set_ppgtt(struct i915_gem_context *ctx,
static int set_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
struct i915_hw_ppgtt *ppgtt, *old;
int err;
......@@ -1166,7 +1167,7 @@ static int
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
struct drm_i915_private *i915 = ce->engine->i915;
struct i915_request *rq, *prev;
struct i915_request *rq;
intel_wakeref_t wakeref;
int ret;
......@@ -1191,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
/* Queue this switch after all other activity by this context. */
prev = i915_active_request_raw(&ce->ring->timeline->last_request,
&i915->drm.struct_mutex);
if (prev && !i915_request_completed(prev)) {
ret = i915_request_await_dma_fence(rq, &prev->fence);
if (ret < 0)
goto out_add;
}
/* Order all following requests to be after. */
ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
......@@ -1394,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx,
return -EINVAL;
engine = intel_engine_lookup_user(i915,
user_sseu.engine_class,
user_sseu.engine_instance);
user_sseu.engine.engine_class,
user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
......@@ -1416,7 +1408,8 @@ static int set_sseu(struct i915_gem_context *ctx,
return 0;
}
static int ctx_setparam(struct i915_gem_context *ctx,
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
{
int ret = 0;
......@@ -1485,7 +1478,7 @@ static int ctx_setparam(struct i915_gem_context *ctx,
break;
case I915_CONTEXT_PARAM_VM:
ret = set_ppgtt(ctx, args);
ret = set_ppgtt(fpriv, ctx, args);
break;
case I915_CONTEXT_PARAM_BAN_PERIOD:
......@@ -1513,7 +1506,7 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
if (local.param.ctx_id)
return -EINVAL;
return ctx_setparam(arg->ctx, &local.param);
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
static const i915_user_extension_fn create_extensions[] = {
......@@ -1633,8 +1626,8 @@ static int get_sseu(struct i915_gem_context *ctx,
return -EINVAL;
engine = intel_engine_lookup_user(ctx->i915,
user_sseu.engine_class,
user_sseu.engine_instance);
user_sseu.engine.engine_class,
user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
......@@ -1712,7 +1705,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_CONTEXT_PARAM_VM:
ret = get_ppgtt(ctx, args);
ret = get_ppgtt(file_priv, ctx, args);
break;
case I915_CONTEXT_PARAM_BAN_PERIOD:
......@@ -1737,7 +1730,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
if (!ctx)
return -ENOENT;
ret = ctx_setparam(ctx, args);
ret = ctx_setparam(file_priv, ctx, args);
i915_gem_context_put(ctx);
return ret;
......
......@@ -142,7 +142,7 @@ void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct i915_request *rq);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
unsigned long engine_mask);
intel_engine_mask_t engine_mask);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
......
......@@ -1228,7 +1228,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
PTE_READ_ONLY);
vm->has_read_only);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
......@@ -1548,8 +1548,13 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt_init(i915, ppgtt);
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
*
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
* for now.
*/
ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
......
......@@ -390,7 +390,7 @@ struct i915_hw_ppgtt {
struct i915_address_space vm;
struct kref ref;
unsigned long pd_dirty_engines;
intel_engine_mask_t pd_dirty_engines;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
......
......@@ -502,4 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush);
#endif
......@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
if (!pages)
return;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) {
......
......@@ -17,6 +17,33 @@
static LIST_HEAD(globals);
static atomic_t active;
static atomic_t epoch;
static struct park_work {
struct rcu_work work;
int epoch;
} park;
static void i915_globals_shrink(void)
{
struct i915_global *global;
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
list_for_each_entry(global, &globals, link)
global->shrink();
}
static void __i915_globals_park(struct work_struct *work)
{
/* Confirm nothing woke up in the last grace period */
if (park.epoch == atomic_read(&epoch))
i915_globals_shrink();
}
void __init i915_global_register(struct i915_global *global)
{
GEM_BUG_ON(!global->shrink);
......@@ -57,44 +84,12 @@ int __init i915_globals_init(void)
}
}
INIT_RCU_WORK(&park.work, __i915_globals_park);
return 0;
}
static void i915_globals_shrink(void)
{
struct i915_global *global;
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
list_for_each_entry(global, &globals, link)
global->shrink();
}
static atomic_t active;
static atomic_t epoch;
struct park_work {
struct rcu_work work;
int epoch;
};
static void __i915_globals_park(struct work_struct *work)
{
struct park_work *wrk = container_of(work, typeof(*wrk), work.work);
/* Confirm nothing woke up in the last grace period */
if (wrk->epoch == atomic_read(&epoch))
i915_globals_shrink();
kfree(wrk);
}
void i915_globals_park(void)
{
struct park_work *wrk;
/*
* Defer shrinking the global slab caches (and other work) until
* after a RCU grace period has completed with no activity. This
......@@ -107,13 +102,8 @@ void i915_globals_park(void)
if (!atomic_dec_and_test(&active))
return;
wrk = kmalloc(sizeof(*wrk), GFP_KERNEL);
if (!wrk)
return;
wrk->epoch = atomic_inc_return(&epoch);
INIT_RCU_WORK(&wrk->work, __i915_globals_park);
queue_rcu_work(system_wq, &wrk->work);
park.epoch = atomic_inc_return(&epoch);
queue_rcu_work(system_wq, &park.work);
}
void i915_globals_unpark(void)
......@@ -125,8 +115,8 @@ void i915_globals_unpark(void)
void __exit i915_globals_exit(void)
{
/* Flush any residual park_work */
rcu_barrier();
flush_scheduled_work();
atomic_inc(&epoch);
flush_rcu_work(&park.work);
__i915_globals_cleanup();
......
......@@ -677,6 +677,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
err_printf(m, "Subplatform: 0x%x\n",
intel_subplatform(&error->runtime_info,
error->device_info.platform));
err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
......@@ -1093,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static u32 i915_error_generate_code(struct i915_gpu_state *error,
unsigned long engine_mask)
intel_engine_mask_t engine_mask)
{
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
......@@ -1212,20 +1215,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN(dev_priv, 6))
if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
else if (IS_GEN(dev_priv, 7))
} else if (IS_GEN(dev_priv, 7)) {
ee->vm_info.pp_dir_base =
ENGINE_READ(engine, RING_PP_DIR_BASE);
else if (INTEL_GEN(dev_priv) >= 8)
ENGINE_READ(engine, RING_PP_DIR_BASE);
} else if (INTEL_GEN(dev_priv) >= 8) {
u32 base = engine->mmio_base;
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
I915_READ(GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
I915_READ(GEN8_RING_PDP_LDW(engine, i));
I915_READ(GEN8_RING_PDP_LDW(base, i));
}
}
}
}
......@@ -1629,16 +1635,17 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN(dev_priv, 2)) {
error->ier = I915_READ16(IER);
error->ier = I915_READ16(GEN2_IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
error->ier = I915_READ(GEN2_IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
}
static const char *
error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
error_msg(struct i915_gpu_state *error,
intel_engine_mask_t engines, const char *msg)
{
int len;
int i;
......@@ -1648,7 +1655,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%lx:0x%08x",
"GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
i915_error_generate_code(error, engines));
if (engines) {
......@@ -1787,7 +1794,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* to pick up.
*/
void i915_capture_error_state(struct drm_i915_private *i915,
unsigned long engine_mask,
intel_engine_mask_t engine_mask,
const char *msg)
{
static bool warned;
......
......@@ -263,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
unsigned long engine_mask,
intel_engine_mask_t engine_mask,
const char *error_msg);
static inline struct i915_gpu_state *
......
此差异已折叠。
......@@ -31,8 +31,9 @@
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_selftest.h"
#include "intel_fbdev.h"
#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define I845_PIPE_OFFSETS \
......@@ -116,8 +117,16 @@
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
#define BDW_COLORS \
.color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
#define I9XX_COLORS \
.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
.color = { .gamma_lut_size = 129, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define ILK_COLORS \
.color = { .gamma_lut_size = 1024 }
#define IVB_COLORS \
.color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
......@@ -150,6 +159,7 @@
.has_coherent_ggtt = false, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES
#define I845_FEATURES \
......@@ -166,6 +176,7 @@
.has_coherent_ggtt = false, \
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i830_info = {
......@@ -199,6 +210,7 @@ static const struct intel_device_info intel_i865g_info = {
.has_coherent_ggtt = true, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i915g_info = {
......@@ -257,7 +269,14 @@ static const struct intel_device_info intel_g33_info = {
.display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_pineview_g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.display.has_hotplug = 1,
.display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
......@@ -276,6 +295,7 @@ static const struct intel_device_info intel_pineview_info = {
.has_coherent_ggtt = true, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i965g_info = {
......@@ -325,6 +345,7 @@ static const struct intel_device_info intel_gm45_info = {
.has_rc6 = 0, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_ironlake_d_info = {
......@@ -353,6 +374,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
GEN_DEFAULT_PAGE_SIZES
#define SNB_D_PLATFORM \
......@@ -399,6 +421,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
GEN_DEFAULT_PAGE_SIZES
#define IVB_D_PLATFORM \
......@@ -457,6 +480,7 @@ static const struct intel_device_info intel_valleyview_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
};
......@@ -494,7 +518,6 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
......@@ -629,7 +652,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_ipc = 1, \
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS, \
IVB_COLORS, \
GEN9_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_broxton_info = {
......@@ -761,7 +784,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#ifndef _I915_PRIOLIST_TYPES_H_
#define _I915_PRIOLIST_TYPES_H_
#include <linux/list.h>
#include <linux/rbtree.h>
#include <uapi/drm/i915_drm.h>
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 3
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
unsigned long used;
int priority;
};
#endif /* _I915_PRIOLIST_TYPES_H_ */
......@@ -439,8 +439,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
......@@ -2446,8 +2446,10 @@ enum i915_power_well_id {
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_REQUEST_RESET (1 << 0)
#define RESET_CTL_READY_TO_RESET (1 << 1)
#define RESET_CTL_CAT_ERROR REG_BIT(2)
#define RESET_CTL_READY_TO_RESET REG_BIT(1)
#define RESET_CTL_REQUEST_RESET REG_BIT(0)
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
......@@ -2713,10 +2715,10 @@ enum i915_power_well_id {
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
#define IER _MMIO(0x20a0)
#define IIR _MMIO(0x20a4)
#define IMR _MMIO(0x20a8)
#define ISR _MMIO(0x20ac)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
#define GEN2_ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
......@@ -4209,42 +4211,6 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
/* VLV eDP PSR registers */
#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
#define VLV_EDP_PSR_ENABLE (1 << 0)
#define VLV_EDP_PSR_RESET (1 << 1)
#define VLV_EDP_PSR_MODE_MASK (7 << 2)
#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
#define VLV_EDP_PSR_CURR_STATE_MASK 7
#define VLV_EDP_PSR_DISABLED (0 << 0)
#define VLV_EDP_PSR_INACTIVE (1 << 0)
#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
#define VLV_EDP_PSR_EXIT (5 << 0)
#define VLV_EDP_PSR_IN_TRANS (1 << 7)
#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
......@@ -5795,6 +5761,10 @@ enum {
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
......@@ -7209,11 +7179,21 @@ enum {
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
/* ilk/snb precision palette */
#define _PREC_PALETTE_A 0x4b000
#define _PREC_PALETTE_B 0x4c000
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
#define _PREC_PIPEAGCMAX 0x4d000
#define _PREC_PIPEBGCMAX 0x4d010
#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
#define PRE_CSC_GAMMA_ENABLE (1 << 31)
#define POST_CSC_GAMMA_ENABLE (1 << 30)
#define GAMMA_MODE_MODE_MASK (3 << 0)
#define GAMMA_MODE_MODE_8BIT (0 << 0)
#define GAMMA_MODE_MODE_10BIT (1 << 0)
#define GAMMA_MODE_MODE_12BIT (2 << 0)
......@@ -8709,8 +8689,9 @@ enum {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
#define GEN9_RENDER_PG_ENABLE (1 << 0)
#define GEN9_MEDIA_PG_ENABLE (1 << 1)
#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
......@@ -8725,6 +8706,11 @@ enum {
#define GEN6_PMIER _MMIO(0x4402C)
#define GEN6_PM_MBOX_EVENT (1 << 25)
#define GEN6_PM_THERMAL_EVENT (1 << 24)
/*
* For Gen11 these are in the upper word of the GPM_WGBOXPERF
* registers. Shifting is handled on accessing the imr and ier.
*/
#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
......@@ -10127,6 +10113,7 @@ enum skl_power_gate {
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
......@@ -10144,6 +10131,7 @@ enum skl_power_gate {
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
......
......@@ -29,10 +29,11 @@
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_reset.h"
#include "intel_pm.h"
struct execute_cb {
struct list_head link;
......@@ -100,6 +101,7 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
kmem_cache_free(global.slab_requests, rq);
}
......@@ -551,6 +553,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_request *request =
container_of(fence, typeof(*request), semaphore);
switch (state) {
case FENCE_COMPLETE:
/*
* We only check a small portion of our dependencies
* and so cannot guarantee that there remains no
* semaphore chain across all. Instead of opting
* for the full NOSEMAPHORE boost, we go for the
* smaller (but still preempting) boost of
* NEWCLIENT. This will be enough to boost over
* a busywaiting request (as that cannot be
* NEWCLIENT) without accidentally boosting
* a busywait over real work elsewhere.
*/
i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
break;
case FENCE_FREE:
i915_request_put(request);
break;
}
return NOTIFY_DONE;
}
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
......@@ -583,11 +615,6 @@ i915_request_alloc_slow(struct intel_context *ce)
return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
}
static int add_timeline_barrier(struct i915_request *rq)
{
return i915_request_await_active_request(rq, &rq->timeline->barrier);
}
/**
* i915_request_alloc - allocate a request structure
*
......@@ -706,6 +733,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
i915_sched_node_init(&rq->sched);
......@@ -737,10 +765,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
ret = add_timeline_barrier(rq);
if (ret)
goto err_unwind;
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
......@@ -751,7 +775,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
lockdep_assert_held(&rq->timeline->mutex);
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
return rq;
err_unwind:
......@@ -783,6 +810,18 @@ emit_semaphore_wait(struct i915_request *to,
GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
if (to->sched.semaphores & from->engine->mask)
return i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
err = i915_sw_fence_await_dma_fence(&to->semaphore,
&from->fence, 0,
I915_FENCE_GFP);
if (err < 0)
return err;
/* We need to pin the signaler's HWSP until we are finished reading. */
err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
......@@ -814,7 +853,8 @@ emit_semaphore_wait(struct i915_request *to,
*cs++ = 0;
intel_ring_advance(to, cs);
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE;
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
}
......@@ -1063,6 +1103,8 @@ void i915_request_add(struct i915_request *request)
engine->name, request->fence.context, request->fence.seqno);
lockdep_assert_held(&request->timeline->mutex);
lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
trace_i915_request_add(request);
/*
......@@ -1110,6 +1152,7 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
i915_sw_fence_commit(&request->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
......@@ -1126,7 +1169,7 @@ void i915_request_add(struct i915_request *request)
* far in the distance past over useful work, we keep a history
* of any semaphore use along our dependency chain.
*/
if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE))
if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
/*
......@@ -1316,7 +1359,9 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq);
local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
local_bh_enable(); /* kick tasklets en masse */
}
wait.tsk = current;
......
......@@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
#include "i915_gem.h"
#include "i915_scheduler.h"
......@@ -120,6 +121,15 @@ struct i915_request {
*/
unsigned long rcustate;
/*
* We pin the timeline->mutex while constructing the request to
* ensure that no caller accidentally drops it during construction.
* The timeline->mutex must be held to ensure that only this caller
* can use the ring and manipulate the associated timeline during
* construction.
*/
struct pin_cookie cookie;
/*
* Fences for the various phases in the request's lifetime.
*
......@@ -133,6 +143,7 @@ struct i915_request {
struct i915_sw_dma_fence_cb dmaq;
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
......
......@@ -18,6 +18,26 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw(uncore, reg, 0, set);
}
static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
intel_uncore_rmw(uncore, reg, clr, 0);
}
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw_fw(uncore, reg, 0, set);
}
static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
......@@ -119,7 +139,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
GEM_TRACE("%s\n", engine->name);
......@@ -127,32 +147,35 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
if (intel_engine_stop_cs(engine))
GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
intel_uncore_write_fw(uncore,
RING_HEAD(base),
intel_uncore_read_fw(uncore, RING_TAIL(base)));
intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
I915_WRITE_FW(RING_HEAD(base), 0);
I915_WRITE_FW(RING_TAIL(base), 0);
POSTING_READ_FW(RING_TAIL(base));
intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
/* The ring must be empty before it is disabled */
I915_WRITE_FW(RING_CTL(base), 0);
intel_uncore_write_fw(uncore, RING_CTL(base), 0);
/* Check acts as a post */
if (I915_READ_FW(RING_HEAD(base)))
if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
GEM_TRACE("%s: ring head [%x] not parked\n",
engine->name, I915_READ_FW(RING_HEAD(base)));
engine->name,
intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void i915_stop_engines(struct drm_i915_private *i915,
unsigned int engine_mask)
intel_engine_mask_t engine_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_engine_mask_t tmp;
if (INTEL_GEN(i915) < 3)
return;
for_each_engine_masked(engine, i915, engine_mask, id)
for_each_engine_masked(engine, i915, engine_mask, tmp)
gen3_stop_engine(engine);
}
......@@ -165,7 +188,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
}
static int i915_do_reset(struct drm_i915_private *i915,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
......@@ -194,7 +217,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
}
static int g33_do_reset(struct drm_i915_private *i915,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
......@@ -203,17 +226,17 @@ static int g33_do_reset(struct drm_i915_private *i915,
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
static int g4x_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
static int g4x_do_reset(struct drm_i915_private *i915,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct pci_dev *pdev = i915->drm.pdev;
struct intel_uncore *uncore = &i915->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
I915_WRITE_FW(VDECCLK_GATE_D,
I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ_FW(VDECCLK_GATE_D);
rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
......@@ -234,18 +257,17 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
I915_WRITE_FW(VDECCLK_GATE_D,
I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ_FW(VDECCLK_GATE_D);
rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
}
static int ironlake_do_reset(struct drm_i915_private *dev_priv,
unsigned int engine_mask,
static int ironlake_do_reset(struct drm_i915_private *i915,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_uncore *uncore = &i915->uncore;
int ret;
intel_uncore_write_fw(uncore, ILK_GDSR,
......@@ -277,10 +299,10 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
static int gen6_hw_domain_reset(struct drm_i915_private *i915,
u32 hw_domain_mask)
{
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_uncore *uncore = &i915->uncore;
int err;
/*
......@@ -303,7 +325,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
}
static int gen6_reset_engines(struct drm_i915_private *i915,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
......@@ -319,7 +341,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
unsigned int tmp;
intel_engine_mask_t tmp;
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
......@@ -331,11 +353,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
return gen6_hw_domain_reset(i915, hw_mask);
}
static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
......@@ -382,7 +403,7 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
if (__intel_wait_for_register_fw(uncore,
sfc_forced_lock_ack,
......@@ -399,10 +420,10 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
return 0;
}
static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
......@@ -424,12 +445,11 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
return;
}
I915_WRITE_FW(sfc_forced_lock,
I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
static int gen11_reset_engines(struct drm_i915_private *i915,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
const u32 hw_engine_mask[] = {
......@@ -443,7 +463,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
[VECS1] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
unsigned int tmp;
intel_engine_mask_t tmp;
u32 hw_mask;
int ret;
......@@ -454,7 +474,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
for_each_engine_masked(engine, i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
hw_mask |= gen11_lock_sfc(i915, engine);
hw_mask |= gen11_lock_sfc(engine);
}
}
......@@ -462,46 +482,62 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
if (engine_mask != ALL_ENGINES)
for_each_engine_masked(engine, i915, engine_mask, tmp)
gen11_unlock_sfc(i915, engine);
gen11_unlock_sfc(engine);
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = &engine->i915->uncore;
struct intel_uncore *uncore = engine->uncore;
const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
u32 request, mask, ack;
int ret;
intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
* For catastrophic errors, ready-for-reset sequence
* needs to be bypassed: HAS#396813
*/
request = RESET_CTL_CAT_ERROR;
mask = RESET_CTL_CAT_ERROR;
/* Catastrophic errors need to be cleared by HW */
ack = 0;
} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
request = RESET_CTL_REQUEST_RESET;
mask = RESET_CTL_READY_TO_RESET;
ack = RESET_CTL_READY_TO_RESET;
} else {
return 0;
}
ret = __intel_wait_for_register_fw(uncore,
RING_RESET_CTL(engine->mmio_base),
RESET_CTL_READY_TO_RESET,
RESET_CTL_READY_TO_RESET,
700, 0,
NULL);
intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
engine->name, request,
intel_uncore_read_fw(uncore, reg));
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
intel_uncore_write_fw(engine->uncore,
RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct drm_i915_private *i915,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
unsigned int tmp;
intel_engine_mask_t tmp;
int ret;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
......@@ -537,7 +573,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
}
typedef int (*reset_func)(struct drm_i915_private *,
unsigned int engine_mask,
intel_engine_mask_t engine_mask,
unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
......@@ -558,7 +594,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
int intel_gpu_reset(struct drm_i915_private *i915,
intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
......@@ -646,7 +683,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
......@@ -692,7 +729,8 @@ static void gt_revoke(struct drm_i915_private *i915)
revoke_mmaps(i915);
}
static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static int gt_reset(struct drm_i915_private *i915,
intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
......@@ -717,7 +755,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
......@@ -951,7 +989,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
return result;
}
static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static int do_reset(struct drm_i915_private *i915,
intel_engine_mask_t stalled_mask)
{
int err, i;
......@@ -986,7 +1025,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* - re-init display
*/
void i915_reset(struct drm_i915_private *i915,
unsigned int stalled_mask,
intel_engine_mask_t stalled_mask,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
......@@ -1173,49 +1212,50 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg)
static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
I915_WRITE(reg, I915_READ(reg));
intel_uncore_rmw(uncore, reg, 0, 0);
}
void i915_clear_error_registers(struct drm_i915_private *dev_priv)
void i915_clear_error_registers(struct drm_i915_private *i915)
{
struct intel_uncore *uncore = &i915->uncore;
u32 eir;
if (!IS_GEN(dev_priv, 2))
clear_register(dev_priv, PGTBL_ER);
if (!IS_GEN(i915, 2))
clear_register(uncore, PGTBL_ER);
if (INTEL_GEN(dev_priv) < 4)
clear_register(dev_priv, IPEIR(RENDER_RING_BASE));
if (INTEL_GEN(i915) < 4)
clear_register(uncore, IPEIR(RENDER_RING_BASE));
else
clear_register(dev_priv, IPEIR_I965);
clear_register(uncore, IPEIR_I965);
clear_register(dev_priv, EIR);
eir = I915_READ(EIR);
clear_register(uncore, EIR);
eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
rmw_set(uncore, EMR, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
if (INTEL_GEN(dev_priv) >= 8) {
I915_WRITE(GEN8_RING_FAULT_REG,
I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
POSTING_READ(GEN8_RING_FAULT_REG);
} else if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_GEN(i915) >= 8) {
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (INTEL_GEN(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id) {
I915_WRITE(RING_FAULT_REG(engine),
I915_READ(RING_FAULT_REG(engine)) &
~RING_FAULT_VALID);
for_each_engine(engine, i915, id) {
rmw_clear(uncore,
RING_FAULT_REG(engine), RING_FAULT_VALID);
intel_uncore_posting_read(uncore,
RING_FAULT_REG(engine));
}
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0]));
}
}
......@@ -1233,14 +1273,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_i915_private *i915,
u32 engine_mask,
intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
unsigned int tmp;
intel_engine_mask_t tmp;
char error_msg[80];
char *msg = NULL;
......
......@@ -11,13 +11,16 @@
#include <linux/types.h>
#include <linux/srcu.h>
#include "intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct intel_guc;
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *i915,
u32 engine_mask,
intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
......@@ -25,7 +28,7 @@ void i915_handle_error(struct drm_i915_private *i915,
void i915_clear_error_registers(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
unsigned int stalled_mask,
intel_engine_mask_t stalled_mask,
const char *reason);
int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
......@@ -41,7 +44,8 @@ int i915_terminally_wedged(struct drm_i915_private *i915);
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);
int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
int intel_gpu_reset(struct drm_i915_private *i915,
intel_engine_mask_t engine_mask);
int intel_reset_guc(struct drm_i915_private *i915);
......
......@@ -41,6 +41,7 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
node->semaphores = 0;
node->flags = 0;
}
......@@ -63,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
{
bool ret = false;
spin_lock(&schedule_lock);
spin_lock_irq(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
......@@ -73,14 +74,14 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
if (signal->flags & I915_SCHED_HAS_SEMAPHORE &&
if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE;
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
ret = true;
}
spin_unlock(&schedule_lock);
spin_unlock_irq(&schedule_lock);
return ret;
}
......@@ -107,7 +108,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
GEM_BUG_ON(!list_empty(&node->link));
spin_lock(&schedule_lock);
spin_lock_irq(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
......@@ -134,7 +135,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
i915_dependency_free(dep);
}
spin_unlock(&schedule_lock);
spin_unlock_irq(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
......@@ -355,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq,
memset(&cache, 0, sizeof(cache));
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
......@@ -406,32 +407,33 @@ static void __i915_schedule(struct i915_request *rq,
tasklet_hi_schedule(&engine->execlists.tasklet);
}
spin_unlock_irq(&engine->timeline.lock);
spin_unlock(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
spin_lock(&schedule_lock);
spin_lock_irq(&schedule_lock);
__i915_schedule(rq, attr);
spin_unlock(&schedule_lock);
spin_unlock_irq(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
spin_lock_bh(&schedule_lock);
spin_lock_irqsave(&schedule_lock, flags);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
spin_unlock_bh(&schedule_lock);
spin_unlock_irqrestore(&schedule_lock, flags);
}
void __i915_priolist_free(struct i915_priolist *p)
......
......@@ -8,92 +8,10 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
I915_PRIORITY_INVALID = INT_MIN
};
#define I915_USER_PRIORITY_SHIFT 3
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
#define I915_PRIORITY_WAIT ((u8)BIT(0))
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
struct i915_sched_attr {
/**
* @priority: execution and service priority
*
* All clients are equal, but some are more equal than others!
*
* Requests from a context with a greater (more positive) value of
* @priority will be executed before those with a lower @priority
* value, forming a simple QoS.
*
* The &drm_i915_private.kernel_context is assigned the lowest priority.
*/
int priority;
};
/*
* "People assume that time is a strict progression of cause to effect, but
* actually, from a nonlinear, non-subjective viewpoint, it's more like a big
* ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
*
* Requests exist in a complex web of interdependencies. Each request
* has to wait for some other request to complete before it is ready to be run
* (e.g. we have to wait until the pixels have been rendering into a texture
* before we can copy from it). We track the readiness of a request in terms
* of fences, but we also need to keep the dependency tree for the lifetime
* of the request (beyond the life of an individual fence). We use the tree
* at various points to reorder the requests whilst keeping the requests
* in order with respect to their various dependencies.
*
* There is no active component to the "scheduler". As we know the dependency
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
struct i915_sched_attr attr;
unsigned int flags;
#define I915_SCHED_HAS_SEMAPHORE BIT(0)
};
struct i915_dependency {
struct i915_sched_node *signaler;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0)
};
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
unsigned long used;
int priority;
};
#include "i915_scheduler_types.h"
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2018 Intel Corporation
*/
#ifndef _I915_SCHEDULER_TYPES_H_
#define _I915_SCHEDULER_TYPES_H_
#include <linux/list.h>
#include "i915_priolist_types.h"
#include "intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct i915_sched_attr {
/**
* @priority: execution and service priority
*
* All clients are equal, but some are more equal than others!
*
* Requests from a context with a greater (more positive) value of
* @priority will be executed before those with a lower @priority
* value, forming a simple QoS.
*
* The &drm_i915_private.kernel_context is assigned the lowest priority.
*/
int priority;
};
/*
* "People assume that time is a strict progression of cause to effect, but
* actually, from a nonlinear, non-subjective viewpoint, it's more like a big
* ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
*
* Requests exist in a complex web of interdependencies. Each request
* has to wait for some other request to complete before it is ready to be run
* (e.g. we have to wait until the pixels have been rendering into a texture
* before we can copy from it). We track the readiness of a request in terms
* of fences, but we also need to keep the dependency tree for the lifetime
* of the request (beyond the life of an individual fence). We use the tree
* at various points to reorder the requests whilst keeping the requests
* in order with respect to their various dependencies.
*
* There is no active component to the "scheduler". As we know the dependency
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
struct list_head waiters_list; /* those after us, they depend upon us */
struct list_head link;
struct i915_sched_attr attr;
unsigned int flags;
#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
intel_engine_mask_t semaphores;
};
struct i915_dependency {
struct i915_sched_node *signaler;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0)
};
#endif /* _I915_SCHEDULER_TYPES_H_ */
......@@ -25,8 +25,10 @@
*/
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_fbc.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
......
......@@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
......@@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
......
......@@ -27,6 +27,7 @@
#include <linux/lockdep.h>
#include "i915_active.h"
#include "i915_syncmap.h"
#include "i915_timeline_types.h"
......@@ -109,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);
/**
* i915_timeline_set_barrier - orders submission between different timelines
* @timeline: timeline to set the barrier on
* @rq: request after which new submissions can proceed
*
* Sets the passed in request as the serialization point for all subsequent
* submissions on @timeline. Subsequent requests will not be submitted to GPU
* until the barrier has been completed.
*/
static inline int
i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
{
return i915_active_request_set(&tl->barrier, rq);
}
#endif
......@@ -9,9 +9,10 @@
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "i915_active.h"
#include "i915_active_types.h"
struct drm_i915_private;
struct i915_vma;
......@@ -60,16 +61,6 @@ struct i915_timeline {
*/
struct i915_syncmap *sync;
/**
* Barrier provides the ability to serialize ordering between different
* timelines.
*
* Users can call i915_timeline_set_barrier which will make all
* subsequent submissions to this timeline be executed only after the
* barrier has been completed.
*/
struct i915_active_request barrier;
struct list_head link;
struct drm_i915_private *i915;
......
......@@ -25,9 +25,13 @@
* Jani Nikula <jani.nikula@intel.com>
*/
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dsi.h"
#include "intel_panel.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
......@@ -1148,13 +1152,11 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
if (wakeref) {
intel_display_power_put(dev_priv,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_A_IO :
POWER_DOMAIN_PORT_DDI_B_IO,
wakeref);
}
intel_display_power_put(dev_priv,
port == PORT_A ?
POWER_DOMAIN_PORT_DDI_A_IO :
POWER_DOMAIN_PORT_DDI_B_IO,
wakeref);
}
/* set mode to DDI */
......
......@@ -35,6 +35,8 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include "intel_hdcp.h"
#include "intel_sprite.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
......
......@@ -35,7 +35,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include "intel_atomic_plane.h"
#include "intel_drv.h"
#include "intel_pm.h"
#include "intel_sprite.h"
struct intel_plane *intel_plane_alloc(void)
{
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_ATOMIC_PLANE_H__
#define __INTEL_ATOMIC_PLANE_H__
struct drm_plane;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
void intel_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_update_slave(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_disable_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
struct intel_plane *intel_plane_alloc(void);
void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
......@@ -21,14 +21,16 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <drm/drm_edid.h>
#include <drm/i915_component.h>
#include <drm/intel_lpe_audio.h>
#include "intel_drv.h"
#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "intel_audio.h"
#include "intel_drv.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
......@@ -741,18 +743,78 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
int ret;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(&dev_priv->drm);
if (WARN_ON(!state))
return;
state->acquire_ctx = &ctx;
retry:
to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
enable ? 2 * 96000 : 0;
/*
* Protects dev_priv->cdclk.force_min_cdclk
* Need to lock this here in case we have no active pipes
* and thus wouldn't lock it during the commit otherwise.
*/
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
&ctx);
if (!ret)
ret = drm_atomic_commit(state);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
WARN_ON(ret);
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
return intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
/* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
if (dev_priv->audio_power_refcount++ == 0)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
return ret;
}
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO, cookie);
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
......@@ -985,7 +1047,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
void i915_audio_component_init(struct drm_i915_private *dev_priv)
static void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
......@@ -1008,7 +1070,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_AUDIO_H__
#define __INTEL_AUDIO_H__
struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
#endif /* __INTEL_AUDIO_H__ */
......@@ -27,8 +27,6 @@
#include "i915_drv.h"
#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static void irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
......@@ -82,7 +80,7 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct intel_context *ce, *cn;
......@@ -146,19 +144,13 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
dma_fence_signal(&rq->fence);
i915_request_put(rq);
}
return !list_empty(&signal);
}
bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
bool result;
local_irq_disable();
result = intel_engine_breadcrumbs_irq(engine);
intel_engine_breadcrumbs_irq(engine);
local_irq_enable();
return result;
}
static void signal_irq_work(struct irq_work *work)
......
......@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "intel_cdclk.h"
#include "intel_drv.h"
/**
......@@ -517,7 +518,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
......@@ -599,7 +601,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
......@@ -698,7 +701,8 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val;
......@@ -988,7 +992,8 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
......@@ -1124,16 +1129,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
dev_priv->cdclk.hw.vco = -1;
}
/**
* skl_init_cdclk - Initialize CDCLK on SKL
* @dev_priv: i915 device
*
* Initialize CDCLK for SKL and derivatives. This is generally
* done only during the display core initialization sequence,
* after which the DMC will take care of turning CDCLK off/on
* as needed.
*/
void skl_init_cdclk(struct drm_i915_private *dev_priv)
static void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
......@@ -1159,17 +1155,10 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
* skl_uninit_cdclk - Uninitialize CDCLK on SKL
* @dev_priv: i915 device
*
* Uninitialize CDCLK for SKL and derivatives. This is done only
* during the display core uninitialization sequence.
*/
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
......@@ -1177,7 +1166,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
skl_set_cdclk(dev_priv, &cdclk_state);
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int bxt_calc_cdclk(int min_cdclk)
......@@ -1356,7 +1345,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
......@@ -1409,11 +1399,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
/*
* FIXME if only the cd2x divider needs changing, it could be done
* without shutting off the pipe (if only one pipe is active).
*/
val |= BXT_CDCLK_CD2X_PIPE_NONE;
if (pipe == INVALID_PIPE)
val |= BXT_CDCLK_CD2X_PIPE_NONE;
else
val |= BXT_CDCLK_CD2X_PIPE(pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
......@@ -1422,6 +1411,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
......@@ -1491,16 +1483,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
dev_priv->cdclk.hw.vco = -1;
}
/**
* bxt_init_cdclk - Initialize CDCLK on BXT
* @dev_priv: i915 device
*
* Initialize CDCLK for BXT and derivatives. This is generally
* done only during the display core initialization sequence,
* after which the DMC will take care of turning CDCLK off/on
* as needed.
*/
void bxt_init_cdclk(struct drm_i915_private *dev_priv)
static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
......@@ -1526,17 +1509,10 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
}
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
* bxt_uninit_cdclk - Uninitialize CDCLK on BXT
* @dev_priv: i915 device
*
* Uninitialize CDCLK for BXT and derivatives. This is done only
* during the display core uninitialization sequence.
*/
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
......@@ -1544,7 +1520,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_state);
bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int cnl_calc_cdclk(int min_cdclk)
......@@ -1664,7 +1640,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
......@@ -1705,13 +1682,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
/*
* FIXME if only the cd2x divider needs changing, it could be done
* without shutting off the pipe (if only one pipe is active).
*/
val |= BXT_CDCLK_CD2X_PIPE_NONE;
if (pipe == INVALID_PIPE)
val |= BXT_CDCLK_CD2X_PIPE_NONE;
else
val |= BXT_CDCLK_CD2X_PIPE(pipe);
I915_WRITE(CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
......@@ -1848,7 +1827,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
}
static void icl_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
unsigned int cdclk = cdclk_state->cdclk;
unsigned int vco = cdclk_state->vco;
......@@ -1873,6 +1853,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
/*
* On ICL CD2X_DIV can only be 1, so we'll never end up changing the
* divider here synchronized to a pipe while CDCLK is on, nor will we
* need the corresponding vblank wait.
*/
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
......@@ -1960,16 +1945,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
icl_calc_voltage_level(cdclk_state->cdclk);
}
/**
* icl_init_cdclk - Initialize CDCLK on ICL
* @dev_priv: i915 device
*
* Initialize CDCLK for ICL. This consists mainly of initializing
* dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
* is generally done only during the display core initialization sequence, after
* which the DMC will take care of turning CDCLK off/on as needed.
*/
void icl_init_cdclk(struct drm_i915_private *dev_priv)
static void icl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state sanitized_state;
u32 val;
......@@ -2003,17 +1979,10 @@ void icl_init_cdclk(struct drm_i915_private *dev_priv)
sanitized_state.voltage_level =
icl_calc_voltage_level(sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state);
icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
/**
* icl_uninit_cdclk - Uninitialize CDCLK on ICL
* @dev_priv: i915 device
*
* Uninitialize CDCLK for ICL. This is done only during the display core
* uninitialization sequence.
*/
void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
......@@ -2021,19 +1990,10 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state);
icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
* cnl_init_cdclk - Initialize CDCLK on CNL
* @dev_priv: i915 device
*
* Initialize CDCLK for CNL. This is generally
* done only during the display core initialization sequence,
* after which the DMC will take care of turning CDCLK off/on
* as needed.
*/
void cnl_init_cdclk(struct drm_i915_private *dev_priv)
static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
......@@ -2049,17 +2009,10 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
* cnl_uninit_cdclk - Uninitialize CDCLK on CNL
* @dev_priv: i915 device
*
* Uninitialize CDCLK for CNL. This is done only
* during the display core uninitialization sequence.
*/
void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
......@@ -2067,7 +2020,47 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
cnl_set_cdclk(dev_priv, &cdclk_state);
cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
* intel_cdclk_init - Initialize CDCLK
* @i915: i915 device
*
* Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
* sanitizing the state of the hardware if needed. This is generally done only
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
*/
void intel_cdclk_init(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 11)
icl_init_cdclk(i915);
else if (IS_CANNONLAKE(i915))
cnl_init_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_init_cdclk(i915);
else if (IS_GEN9_LP(i915))
bxt_init_cdclk(i915);
}
/**
* intel_cdclk_uninit - Uninitialize CDCLK
* @i915: i915 device
*
* Uninitialize CDCLK. This is done only during the display core
* uninitialization sequence.
*/
void intel_cdclk_uninit(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 11)
icl_uninit_cdclk(i915);
else if (IS_CANNONLAKE(i915))
cnl_uninit_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_uninit_cdclk(i915);
else if (IS_GEN9_LP(i915))
bxt_uninit_cdclk(i915);
}
/**
......@@ -2086,6 +2079,28 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
a->ref != b->ref;
}
/**
* intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
* @dev_priv: Not a CDCLK state, it's the drm_i915_private!
* @a: first CDCLK state
* @b: second CDCLK state
*
* Returns:
* True if the CDCLK states require just a cd2x divider update, false if not.
*/
bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
return false;
return a->cdclk != b->cdclk &&
a->vco == b->vco &&
a->ref == b->ref;
}
/**
* intel_cdclk_changed - Determine if two CDCLK states are different
* @a: first CDCLK state
......@@ -2101,6 +2116,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
a->voltage_level != b->voltage_level;
}
/**
* intel_cdclk_swap_state - make atomic CDCLK configuration effective
* @state: atomic state
*
* This is the CDCLK version of drm_atomic_helper_swap_state() since the
* helper does not handle driver-specific global state.
*
* Similarly to the atomic helpers this function does a complete swap,
* i.e. it also puts the old state into @state. This is used by the commit
* code to determine how CDCLK has changed (for instance did it increase or
* decrease).
*/
void intel_cdclk_swap_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
swap(state->cdclk.logical, dev_priv->cdclk.logical);
swap(state->cdclk.actual, dev_priv->cdclk.actual);
}
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context)
{
......@@ -2114,12 +2149,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
* intel_set_cdclk - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @cdclk_state: new CDCLK state
* @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
......@@ -2129,7 +2166,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
"cdclk state doesn't match!\n")) {
......@@ -2138,6 +2175,46 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
}
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @old_state: old CDCLK state
* @new_state: new CDCLK state
* @pipe: pipe with which to synchronize the update
*
* Program the hardware before updating the HW plane state based on the passed
* in CDCLK state, if necessary.
*/
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *old_state,
const struct intel_cdclk_state *new_state,
enum pipe pipe)
{
if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
intel_set_cdclk(dev_priv, new_state, pipe);
}
/**
* intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @old_state: old CDCLK state
* @new_state: new CDCLK state
* @pipe: pipe with which to synchronize the update
*
* Program the hardware after updating the HW plane state based on the passed
* in CDCLK state, if necessary.
*/
void
intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *old_state,
const struct intel_cdclk_state *new_state,
enum pipe pipe)
{
if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
intel_set_cdclk(dev_priv, new_state, pipe);
}
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
......@@ -2188,19 +2265,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/*
* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
*
* FIXME: Check the actual, not default, BCLK being used.
*
* FIXME: This does not depend on ->has_audio because the higher CDCLK
* is required for audio probe, also when there are no audio capable
* displays connected at probe time. This leads to unnecessarily high
* CDCLK when audio is not required.
*
* FIXME: This limit is only applied when there are displays connected
* at probe time. If we probe without displays, we'll still end up using
* the platform minimum CDCLK, failing audio probe.
*/
if (INTEL_GEN(dev_priv) >= 9)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
......@@ -2240,7 +2306,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
intel_state->min_cdclk[i] = min_cdclk;
}
min_cdclk = 0;
min_cdclk = intel_state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
......@@ -2301,7 +2367,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
cdclk = vlv_calc_cdclk(dev_priv, 0);
cdclk = vlv_calc_cdclk(dev_priv,
intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
......@@ -2334,7 +2401,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = bdw_calc_cdclk(0);
cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
......@@ -2406,7 +2473,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
cdclk = skl_calc_cdclk(0, vco);
cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
......@@ -2445,10 +2512,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
cdclk = glk_calc_cdclk(0);
cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
cdclk = bxt_calc_cdclk(0);
cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
......@@ -2484,7 +2551,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = cnl_calc_cdclk(0);
cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
......@@ -2520,7 +2587,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
cdclk = icl_calc_cdclk(0, ref);
cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CDCLK_H__
#define __INTEL_CDCLK_H__
#include <linux/types.h>
#include "intel_display.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
bool intel_cdclk_changed(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *old_state,
const struct intel_cdclk_state *new_state,
enum pipe pipe);
void
intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *old_state,
const struct intel_cdclk_state *new_state,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
#endif /* __INTEL_CDCLK_H__ */
此差异已折叠。
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
struct intel_crtc_state;
struct intel_crtc;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
......@@ -239,7 +239,8 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
for_each_combo_port_reverse(dev_priv, port) {
u32 val;
if (!icl_combo_phy_verify_state(dev_priv, port))
if (port == PORT_A &&
!icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
......
......@@ -23,12 +23,17 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_hdcp.h"
#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CONNECTOR_H__
#define __INTEL_CONNECTOR_H__
#include "intel_display.h"
struct drm_connector;
struct edid;
struct i2c_adapter;
struct intel_connector;
struct intel_encoder;
int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
int intel_connector_register(struct drm_connector *connector);
void intel_connector_unregister(struct drm_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
bool intel_connector_get_hw_state(struct intel_connector *connector);
enum pipe intel_connector_get_pipe(struct intel_connector *connector);
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
void intel_attach_force_audio_property(struct drm_connector *connector);
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
void intel_attach_colorspace_property(struct drm_connector *connector);
#endif /* __INTEL_CONNECTOR_H__ */
......@@ -24,6 +24,7 @@ struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
};
......
......@@ -27,13 +27,18 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
#include "intel_crt.h"
#include "intel_ddi.h"
#include "intel_drv.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CRT_H__
#define __INTEL_CRT_H__
#include "i915_reg.h"
enum pipe;
struct drm_encoder;
struct drm_i915_private;
struct drm_i915_private;
bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t adpa_reg, enum pipe *pipe);
void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
#endif /* __INTEL_CRT_H__ */
......@@ -21,9 +21,12 @@
* IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_csr.h"
/**
* DOC: csr support for dmc
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef __INTEL_CSR_H__
#define __INTEL_CSR_H__
struct drm_i915_private;
void intel_csr_ucode_init(struct drm_i915_private *i915);
void intel_csr_load_program(struct drm_i915_private *i915);
void intel_csr_ucode_fini(struct drm_i915_private *i915);
void intel_csr_ucode_suspend(struct drm_i915_private *i915);
void intel_csr_ucode_resume(struct drm_i915_private *i915);
#endif /* __INTEL_CSR_H__ */
......@@ -26,9 +26,19 @@
*/
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
......@@ -3847,14 +3857,16 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
if (ret)
return ret;
if (IS_GEN9_LP(dev_priv) && ret)
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
return ret;
return 0;
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
#include "intel_dp.h"
#include "intel_drv.h"
static void
......
此差异已折叠。
......@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "intel_dp.h"
#include "intel_drv.h"
/**
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册