提交 90551a12 编写于 作者: Z Zhenyu Wang

drm/i915/gvt: cleanup usage for typed mmio reg vs. offset

We had previous hack that tried to accept either i915_reg_t or offset
value to access vGPU virtual/shadow regs which broke that purpose to
be type safe in context. This one trys to explicitly separate the usage
of typed mmio reg with real offset.

Old vgpu_vreg(offset) helper is used only for offset now with new
vgpu_vreg_t(reg) is used for i915_reg_t only. Convert left usage
of that to new helper.

Also fixed left KASAN warning issues caused by previous hack.

v2: rebase, fixup against recent mmio switch change
Reviewed-by: NZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: NZhenyu Wang <zhenyuw@linux.intel.com>
上级 4e889d62
...@@ -1239,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, ...@@ -1239,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
return 0; return 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
} else { } else {
stride = (vgpu_vreg(s->vgpu, info->stride_reg) & stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6; GENMASK(15, 6)) >> 6;
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
} }
if (stride != info->stride_val) if (stride != info->stride_val)
...@@ -1264,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1264,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12); info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val); info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10); info->tile_val << 10);
} else { } else {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6); info->stride_val << 6);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10); info->tile_val << 10);
} }
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event); intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0; return 0;
} }
......
...@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0; return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
...@@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) ...@@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES)) if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL; return -EINVAL;
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1; return 1;
if (edp_pipe_is_enabled(vgpu) && if (edp_pipe_is_enabled(vgpu) &&
...@@ -169,105 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { ...@@ -169,105 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu) static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS | SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2); SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
vgpu_vreg(vgpu, LCPLL1_CTL) |= vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE | LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK; LCPLL_PLL_LOCK;
vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) | (PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) | (PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
} }
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG; GEN8_PORT_DP_A_HOTPLUG;
else else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
} }
/* Clear host CRT status, so guest couldn't detect this host CRT. */ /* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
} }
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
...@@ -369,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) ...@@ -369,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe)) if (!pipe_is_enabled(vgpu, pipe))
continue; continue;
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
if (pipe_is_enabled(vgpu, pipe)) { if (pipe_is_enabled(vgpu, pipe)) {
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++; vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]); intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
} }
} }
......
...@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0) ...@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
static void reset_gmbus_controller(struct intel_vgpu *vgpu) static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{ {
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available) if (!vgpu->display.i2c_edid.edid_available)
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
} }
...@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, ...@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) && if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) { !intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port; vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true; vgpu->display.i2c_edid.edid_available = true;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else } else
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0; return 0;
} }
...@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* 2) HW_RDY bit asserted * 2) HW_RDY bit asserted
*/ */
if (wvalue & GMBUS_SW_CLR_INT) { if (wvalue & GMBUS_SW_CLR_INT) {
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
} }
/* For virtualization, we suppose that HW is always ready, /* For virtualization, we suppose that HW is always ready,
...@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* visible in gmbus interface) * visible in gmbus interface)
*/ */
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE; i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
} }
break; break;
case NIDX_NS_W: case NIDX_NS_W:
...@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* START (-->INDEX) -->DATA * START (-->INDEX) -->DATA
*/ */
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE; i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break; break;
default: default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
...@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
u32 reg_data = 0; u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */ /* Data can only be recevied if previous settings correct */
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) { if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0; return 0;
......
...@@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, ...@@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
...@@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, DSPCNTR(pipe)); val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
...@@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt; plane->hw_format = fmt;
plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
(_PRI_PLANE_STRIDE_MASK >> 6) : (_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp); _PRI_PLANE_STRIDE_MASK, plane->bpp);
plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT; _PIPE_H_SRCSZ_SHIFT;
plane->width += 1; plane->width += 1;
plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) & plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */ plane->height += 1; /* raw height is one minus the real value */
val = vgpu_vreg(vgpu, DSPTILEOFF(pipe)); val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT; _PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
...@@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, CURCNTR(pipe)); val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
mode = val & CURSOR_MODE; mode = val & CURSOR_MODE;
plane->enabled = (mode != CURSOR_MODE_DISABLE); plane->enabled = (mode != CURSOR_MODE_DISABLE);
if (!plane->enabled) if (!plane->enabled)
...@@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force); alpha_plane, alpha_force);
plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -EINVAL; return -EINVAL;
} }
val = vgpu_vreg(vgpu, CURPOS(pipe)); val = vgpu_vreg_t(vgpu, CURPOS(pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
...@@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, SPRCTL(pipe)); val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
plane->enabled = !!(val & SPRITE_ENABLE); plane->enabled = !!(val & SPRITE_ENABLE);
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
...@@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format; plane->drm_format = drm_format;
plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
return -EINVAL; return -EINVAL;
} }
plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) & plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
_SPRITE_STRIDE_MASK; _SPRITE_STRIDE_MASK;
val = vgpu_vreg(vgpu, SPRSIZE(pipe)); val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
_SPRITE_SIZE_HEIGHT_SHIFT; _SPRITE_SIZE_HEIGHT_SHIFT;
plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
...@@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->height += 1; /* raw height is one minus the real value */ plane->height += 1; /* raw height is one minus the real value */
plane->width += 1; /* raw width is one minus the real value */ plane->width += 1; /* raw width is one minus the real value */
val = vgpu_vreg(vgpu, SPRPOS(pipe)); val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
val = vgpu_vreg(vgpu, SPROFFSET(pipe)); val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
_SPRITE_OFFSET_START_X_SHIFT; _SPRITE_OFFSET_START_X_SHIFT;
plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
......
...@@ -2244,7 +2244,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2244,7 +2244,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level) int page_table_level)
{ {
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
...@@ -2279,7 +2279,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2279,7 +2279,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level) int page_table_level)
{ {
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
......
...@@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu); ...@@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu, void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value); u32 fence, u64 value);
/* Macros for easily accessing vGPU virtual/shadow register */ /* Macros for easily accessing vGPU virtual/shadow register.
#define vgpu_vreg(vgpu, reg) \ Explicitly seperate use for typed MMIO reg or real offset.*/
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg_t(vgpu, reg) \
#define vgpu_vreg8(vgpu, reg) \ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg(vgpu, offset) \
#define vgpu_vreg16(vgpu, reg) \ (*(u32 *)(vgpu->mmio.vreg + (offset)))
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg64_t(vgpu, reg) \
#define vgpu_vreg64(vgpu, reg) \ (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg64(vgpu, offset) \
#define vgpu_sreg(vgpu, reg) \ (*(u64 *)(vgpu->mmio.vreg + (offset)))
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_sreg_t(vgpu, reg) \
#define vgpu_sreg8(vgpu, reg) \ (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_sreg(vgpu, offset) \
#define vgpu_sreg16(vgpu, reg) \ (*(u32 *)(vgpu->mmio.sreg + (offset)))
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define for_each_active_vgpu(gvt, vgpu, id) \ #define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
......
...@@ -343,13 +343,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu, ...@@ -343,13 +343,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) { if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON; vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE; vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN; vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE; vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
} else } else
vgpu_vreg(vgpu, PCH_PP_STATUS) &= vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
~(PP_ON | PP_SEQUENCE_POWER_DOWN ~(PP_ON | PP_SEQUENCE_POWER_DOWN
| PP_CYCLE_DELAY_ACTIVE); | PP_CYCLE_DELAY_ACTIVE);
return 0; return 0;
...@@ -503,7 +503,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -503,7 +503,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else { } else {
vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E))) if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
&= ~DP_TP_STATUS_AUTOTRAIN_DONE; &= ~DP_TP_STATUS_AUTOTRAIN_DONE;
} }
return 0; return 0;
...@@ -521,9 +521,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu, ...@@ -521,9 +521,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
static int fdi_auto_training_started(struct intel_vgpu *vgpu) static int fdi_auto_training_started(struct intel_vgpu *vgpu)
{ {
u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E)); u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL); u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E)); u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) && if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
(rx_ctl & FDI_RX_ENABLE) && (rx_ctl & FDI_RX_ENABLE) &&
...@@ -564,12 +564,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, ...@@ -564,12 +564,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits; fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
/* If imr bit has been masked */ /* If imr bit has been masked */
if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits) if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
return 0; return 0;
if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits) if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
== fdi_tx_check_bits) == fdi_tx_check_bits)
&& ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits) && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
== fdi_rx_check_bits)) == fdi_rx_check_bits))
return 1; return 1;
else else
...@@ -626,17 +626,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, ...@@ -626,17 +626,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret) if (ret)
vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK; vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2); ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret) if (ret)
vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK; vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
if (offset == _FDI_RXA_CTL) if (offset == _FDI_RXA_CTL)
if (fdi_auto_training_started(vgpu)) if (fdi_auto_training_started(vgpu))
vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |= vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
DP_TP_STATUS_AUTOTRAIN_DONE; DP_TP_STATUS_AUTOTRAIN_DONE;
return 0; return 0;
} }
...@@ -657,7 +657,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -657,7 +657,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8; data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
if (data == 0x2) { if (data == 0x2) {
status_reg = DP_TP_STATUS(index); status_reg = DP_TP_STATUS(index);
vgpu_vreg(vgpu, status_reg) |= (1 << 25); vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
} }
return 0; return 0;
} }
...@@ -721,7 +721,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -721,7 +721,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}; };
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0; return 0;
...@@ -742,7 +742,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -742,7 +742,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}; };
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0; return 0;
...@@ -1064,9 +1064,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, ...@@ -1064,9 +1064,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) { SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) & unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
sbi_offset); sbi_offset);
...@@ -1091,13 +1091,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1091,13 +1091,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = data; vgpu_vreg(vgpu, offset) = data;
if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) { SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) & unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
write_virtual_sbi_register(vgpu, sbi_offset, write_virtual_sbi_register(vgpu, sbi_offset,
vgpu_vreg(vgpu, SBI_DATA)); vgpu_vreg_t(vgpu, SBI_DATA));
} }
return 0; return 0;
} }
...@@ -1343,7 +1343,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1343,7 +1343,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
{ {
u32 value = *(u32 *)p_data; u32 value = *(u32 *)p_data;
u32 cmd = value & 0xff; u32 cmd = value & 0xff;
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
switch (cmd) { switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY: case GEN9_PCODE_READ_MEM_LATENCY:
......
...@@ -336,10 +336,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -336,10 +336,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false; vgpu->mmio.disable_warn_untrack = false;
} else { } else {
......
...@@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, ...@@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data); void *data);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \
u32 *offset = (u32 *)&__reg; \
*offset; \
})
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
......
...@@ -224,7 +224,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -224,7 +224,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else else
vgpu_vreg(vgpu, regs[ring_id]) = 0; vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(dev_priv, fw); intel_uncore_forcewake_put(dev_priv, fw);
...@@ -257,11 +257,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -257,11 +257,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
if (pre) if (pre)
old_v = vgpu_vreg(pre, offset); old_v = vgpu_vreg_t(pre, offset);
else else
old_v = gen9_render_mocs.control_table[ring_id][i]; old_v = gen9_render_mocs.control_table[ring_id][i];
if (next) if (next)
new_v = vgpu_vreg(next, offset); new_v = vgpu_vreg_t(next, offset);
else else
new_v = gen9_render_mocs.control_table[ring_id][i]; new_v = gen9_render_mocs.control_table[ring_id][i];
...@@ -275,11 +275,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -275,11 +275,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
if (pre) if (pre)
old_v = vgpu_vreg(pre, l3_offset); old_v = vgpu_vreg_t(pre, l3_offset);
else else
old_v = gen9_render_mocs.l3cc_table[i]; old_v = gen9_render_mocs.l3cc_table[i];
if (next) if (next)
new_v = vgpu_vreg(next, l3_offset); new_v = vgpu_vreg_t(next, l3_offset);
else else
new_v = gen9_render_mocs.l3cc_table[i]; new_v = gen9_render_mocs.l3cc_table[i];
...@@ -316,11 +316,11 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -316,11 +316,11 @@ static void switch_mmio(struct intel_vgpu *pre,
continue; continue;
// save // save
if (pre) { if (pre) {
vgpu_vreg(pre, mmio->reg) = I915_READ_FW(mmio->reg); vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) if (mmio->mask)
vgpu_vreg(pre, mmio->reg) &= vgpu_vreg_t(pre, mmio->reg) &=
~(mmio->mask << 16); ~(mmio->mask << 16);
old_v = vgpu_vreg(pre, mmio->reg); old_v = vgpu_vreg_t(pre, mmio->reg);
} else } else
old_v = mmio->value = I915_READ_FW(mmio->reg); old_v = mmio->value = I915_READ_FW(mmio->reg);
...@@ -340,10 +340,10 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -340,10 +340,10 @@ static void switch_mmio(struct intel_vgpu *pre,
continue; continue;
if (mmio->mask) if (mmio->mask)
new_v = vgpu_vreg(next, mmio->reg) | new_v = vgpu_vreg_t(next, mmio->reg) |
(mmio->mask << 16); (mmio->mask << 16);
else else
new_v = vgpu_vreg(next, mmio->reg); new_v = vgpu_vreg_t(next, mmio->reg);
} else { } else {
if (mmio->in_context) if (mmio->in_context)
continue; continue;
......
...@@ -38,25 +38,25 @@ ...@@ -38,25 +38,25 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu) void populate_pvinfo_page(struct intel_vgpu *vgpu)
{ {
/* setup the ballooning information */ /* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu); vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu); vgpu_aperture_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu); vgpu_hidden_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu); vgpu_hidden_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册