提交 7c7fba98 编写于 作者: L Linus Torvalds

Merge tag 'drm-fixes-for-4.11-rc2' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Intel, amd and mxsfb fixes.

  These are the drm fixes I've collected for rc2. Mostly i915 GVT only
  fixes, along with a single EDID fix, some mxsfb fixes and a few minor
  amd fixes"

* tag 'drm-fixes-for-4.11-rc2' of git://people.freedesktop.org/~airlied/linux: (38 commits)
  drm: mxsfb: Implement drm_panel handling
  drm: mxsfb_crtc: Fix the framebuffer misplacement
  drm: mxsfb: Fix crash when provided invalid DT bindings
  drm: mxsfb: fix pixel clock polarity
  drm: mxsfb: use bus_format to determine LCD bus width
  drm/amdgpu: bump driver version for some new features
  drm/amdgpu: validate paramaters in the gem ioctl
  drm/amd/amdgpu: fix console deadlock if late init failed
  drm/i915/gvt: change some gvt_err to gvt_dbg_cmd
  drm/i915/gvt: protect RO and Rsvd bits of virtual vgpu configuration space
  drm/i915/gvt: handle workload lifecycle properly
  drm/edid: Add EDID_QUIRK_FORCE_8BPC quirk for Rotel RSX-1058
  drm/i915/gvt: fix an error for F_RO flag
  drm/i915/gvt: use pfn_valid for better checking
  drm/i915/gvt: set SFUSE_STRAP properly for vitual monitor detection
  drm/i915/gvt: fix an error for one register
  drm/i915/gvt: add more registers into handlers list
  drm/i915/gvt: have more registers with F_CMD_ACCESS flags set
  drm/i915/gvt: add some new MMIOs to cmd_access white list
  drm/i915/gvt: fix pcode mailbox write emulation of BDW
  ...
...@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) ...@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
} }
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) if (r) {
if (fbcon)
console_unlock();
return r; return r;
}
/* pin cursors */ /* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
......
...@@ -59,9 +59,10 @@ ...@@ -59,9 +59,10 @@
* - 3.7.0 - Add support for VCE clock list packet * - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel * - 3.8.0 - Add support raster config init in the kernel
* - 3.9.0 - Add support for memory query info about VRAM and GTT. * - 3.9.0 - Add support for memory query info about VRAM and GTT.
* - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 9 #define KMS_DRIVER_MINOR 10
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
......
...@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
bool kernel = false; bool kernel = false;
int r; int r;
/* reject invalid gem flags */
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED|
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
r = -EINVAL;
goto error_unlock;
}
/* reject invalid gem domains */
if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA)) {
r = -EINVAL;
goto error_unlock;
}
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
......
...@@ -148,6 +148,9 @@ static const struct edid_quirk { ...@@ -148,6 +148,9 @@ static const struct edid_quirk {
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
}; };
/* /*
......
...@@ -41,6 +41,54 @@ enum { ...@@ -41,6 +41,54 @@ enum {
INTEL_GVT_PCI_BAR_MAX, INTEL_GVT_PCI_BAR_MAX,
}; };
/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
* byte) byte by byte in standard pci configuration space. (not the full
* 256 bytes.)
*/
static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
[PCI_COMMAND] = 0xff, 0x07,
[PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
[PCI_CACHE_LINE_SIZE] = 0xff,
[PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
[PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
[PCI_INTERRUPT_LINE] = 0xff,
};
/**
* vgpu_pci_cfg_mem_write - write virtual cfg space memory
*
* Use this function to write virtual cfg space memory.
* For standard cfg space, only RW bits can be changed,
* and we emulates the RW1C behavior of PCI_STATUS register.
*/
static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
u8 *src, unsigned int bytes)
{
u8 *cfg_base = vgpu_cfg_space(vgpu);
u8 mask, new, old;
int i = 0;
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
mask = pci_cfg_space_rw_bmp[off + i];
old = cfg_base[off + i];
new = src[i] & mask;
/**
* The PCI_STATUS high byte has RW1C bits, here
* emulates clear by writing 1 for these bits.
* Writing a 0b to RW1C bits has no effect.
*/
if (off + i == PCI_STATUS + 1)
new = (~new & old) & mask;
cfg_base[off + i] = (old & ~mask) | new;
}
/* For other configuration space directly copy as it is. */
if (i < bytes)
memcpy(cfg_base + off + i, src + i, bytes - i);
}
/** /**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
* *
...@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, ...@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new; u8 changed = old ^ new;
int ret; int ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY)) if (!(changed & PCI_COMMAND_MEMORY))
return 0; return 0;
...@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{ {
int ret; int ret;
if (vgpu->failsafe)
return 0;
if (WARN_ON(bytes > 4)) if (WARN_ON(bytes > 4))
return -EINVAL; return -EINVAL;
...@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (ret) if (ret)
return ret; return ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break; break;
default: default:
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break; break;
} }
return 0; return 0;
......
...@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id) ...@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
if (d_info == NULL) if (d_info == NULL)
return; return;
gvt_err("opcode=0x%x %s sub_ops:", gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name); cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++) for (i = 0; i < d_info->nr_sub_op; i++)
...@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s) ...@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0; int cnt = 0;
int i; int i;
gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size, s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail); s->ring_head, s->ring_tail);
gvt_err(" %s %s ip_gma(%08lx) ", gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ? s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER", "RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ? s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma); "GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) { if (s->ip_va == NULL) {
gvt_err(" ip_va(NULL)"); gvt_dbg_cmd(" ip_va(NULL)");
return; return;
} }
gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1), s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3)); cmd_val(s, 2), cmd_val(s, 3));
......
...@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) ...@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0; return 0;
} }
static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
{
/* EDID with 1024x768 as its resolution */
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
/* Vendor & Product Identification */
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
/* Version & Revision */
0x01, 0x04,
/* Basic Display Parameters & Features */
0xa5, 0x34, 0x20, 0x78, 0x23,
/* Color Characteristics */
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
/* Standard Timings. All invalid */
0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
/* 18 Byte Data Blocks 1: invalid */
0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
/* 18 Byte Data Blocks 3: invalid */
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
/* 18 Byte Data Blocks 4: invalid */
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
/* Extension Block Count */
0x00,
/* Checksum */
0xef,
},
{
/* EDID with 1920x1200 as its resolution */ /* EDID with 1920x1200 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = { /*Header*/
/*Header*/ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, /* Vendor & Product Identification */
/* Vendor & Product Identification */ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, /* Version & Revision */
/* Version & Revision */ 0x01, 0x04,
0x01, 0x04, /* Basic Display Parameters & Features */
/* Basic Display Parameters & Features */ 0xa5, 0x34, 0x20, 0x78, 0x23,
0xa5, 0x34, 0x20, 0x78, 0x23, /* Color Characteristics */
/* Color Characteristics */ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, /* Established Timings: maximum resolution is 1024x768 */
/* Established Timings: maximum resolution is 1024x768 */ 0x21, 0x08, 0x00,
0x21, 0x08, 0x00, /*
/* * Standard Timings.
* Standard Timings. * below new resolutions can be supported:
* below new resolutions can be supported: * 1920x1080, 1280x720, 1280x960, 1280x1024,
* 1920x1080, 1280x720, 1280x960, 1280x1024, * 1440x900, 1600x1200, 1680x1050
* 1440x900, 1600x1200, 1680x1050 */
*/ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
/* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, /* 18 Byte Data Blocks 2: invalid */
/* 18 Byte Data Blocks 2: invalid */ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, /* 18 Byte Data Blocks 3: invalid */
/* 18 Byte Data Blocks 3: invalid */ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, /* 18 Byte Data Blocks 4: invalid */
/* 18 Byte Data Blocks 4: invalid */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, /* Extension Block Count */
/* Extension Block Count */ 0x00,
0x00, /* Checksum */
/* Checksum */ 0x45,
0x45, },
}; };
#define DPCD_HEADER_SIZE 0xb #define DPCD_HEADER_SIZE 0xb
...@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if (IS_SKYLAKE(dev_priv) && if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
...@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_PORT_DP_A_HOTPLUG; GEN8_PORT_DP_A_HOTPLUG;
else else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
} }
} }
...@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) ...@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
} }
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type) int type, unsigned int resolution)
{ {
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
return -EINVAL;
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid) if (!port->edid)
return -ENOMEM; return -ENOMEM;
...@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, ...@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
return -ENOMEM; return -ENOMEM;
} }
memcpy(port->edid->edid_block, virtual_dp_monitor_edid, memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
EDID_SIZE); EDID_SIZE);
port->edid->data_valid = true; port->edid->data_valid = true;
...@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
* *
*/ */
int intel_vgpu_init_display(struct intel_vgpu *vgpu) int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
resolution);
} }
/** /**
......
...@@ -154,10 +154,28 @@ struct intel_vgpu_port { ...@@ -154,10 +154,28 @@ struct intel_vgpu_port {
int type; int type;
}; };
enum intel_vgpu_edid {
GVT_EDID_1024_768,
GVT_EDID_1920_1200,
GVT_EDID_NUM,
};
static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
{
switch (id) {
case GVT_EDID_1024_768:
return "1024x768";
case GVT_EDID_1920_1200:
return "1920x1200";
default:
return "";
}
}
void intel_gvt_emulate_vblank(struct intel_gvt *gvt); void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu); int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu); void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu); void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
......
...@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) ...@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
int ret; int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
firmware = vmalloc(size); firmware = vzalloc(size);
if (!firmware) if (!firmware)
return -ENOMEM; return -ENOMEM;
......
...@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gma = g_gtt_index << GTT_PAGE_SHIFT; gma = g_gtt_index << GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */ /* the VM may configure the whole GM space when ballooning is used */
if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), if (!vgpu_gmadr_is_valid(vgpu, gma))
"vgpu%d: found oob ggtt write, offset %x\n",
vgpu->id, off)) {
return 0; return 0;
}
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
...@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ...@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu); return create_scratch_page_tree(vgpu);
} }
static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
{
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, list);
if (mm->type == type) {
vgpu->gvt->gtt.mm_free_page_table(mm);
list_del(&mm->list);
list_del(&mm->lru_list);
kfree(mm);
}
}
}
/** /**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU * @vgpu: a vGPU
...@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ...@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
*/ */
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{ {
struct list_head *pos, *n;
struct intel_vgpu_mm *mm;
ppgtt_free_all_shadow_page(vgpu); ppgtt_free_all_shadow_page(vgpu);
release_scratch_page_tree(vgpu); release_scratch_page_tree(vgpu);
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
mm = container_of(pos, struct intel_vgpu_mm, list); intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
vgpu->gvt->gtt.mm_free_page_table(mm);
list_del(&mm->list);
list_del(&mm->lru_list);
kfree(mm);
}
} }
static void clean_spt_oos(struct intel_gvt *gvt) static void clean_spt_oos(struct intel_gvt *gvt)
...@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) ...@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
int i; int i;
ppgtt_free_all_shadow_page(vgpu); ppgtt_free_all_shadow_page(vgpu);
/* Shadow pages are only created when there is no page
* table tracking data, so remove page tracking data after
* removing the shadow pages.
*/
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
if (!dmlr) if (!dmlr)
return; return;
......
...@@ -143,6 +143,8 @@ struct intel_vgpu { ...@@ -143,6 +143,8 @@ struct intel_vgpu {
int id; int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active; bool active;
bool pv_notified;
bool failsafe;
bool resetting; bool resetting;
void *sched_data; void *sched_data;
...@@ -203,18 +205,18 @@ struct intel_gvt_firmware { ...@@ -203,18 +205,18 @@ struct intel_gvt_firmware {
}; };
struct intel_gvt_opregion { struct intel_gvt_opregion {
void __iomem *opregion_va; void *opregion_va;
u32 opregion_pa; u32 opregion_pa;
}; };
#define NR_MAX_INTEL_VGPU_TYPES 20 #define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type { struct intel_vgpu_type {
char name[16]; char name[16];
unsigned int max_instance;
unsigned int avail_instance; unsigned int avail_instance;
unsigned int low_gm_size; unsigned int low_gm_size;
unsigned int high_gm_size; unsigned int high_gm_size;
unsigned int fence; unsigned int fence;
enum intel_vgpu_edid resolution;
}; };
struct intel_gvt { struct intel_gvt {
...@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params { ...@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
__u64 low_gm_sz; /* in MB */ __u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */ __u64 high_gm_sz; /* in MB */
__u64 fence_sz; __u64 fence_sz;
__u64 resolution;
__s32 primary; __s32 primary;
__u64 vgpu_id; __u64 vgpu_id;
}; };
...@@ -449,6 +452,11 @@ struct intel_gvt_ops { ...@@ -449,6 +452,11 @@ struct intel_gvt_ops {
}; };
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
};
#include "mpt.h" #include "mpt.h"
#endif #endif
...@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt, ...@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->size = size; info->size = size;
info->length = (i + 4) < end ? 4 : (end - i); info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask; info->addr_mask = addr_mask;
info->ro_mask = ro_mask;
info->device = device; info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read; info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write; info->write = write ? write : intel_vgpu_default_mmio_write;
...@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg) ...@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
#define fence_num_to_offset(num) \ #define fence_num_to_offset(num) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
{
switch (reason) {
case GVT_FAILSAFE_UNSUPPORTED_GUEST:
pr_err("Detected your guest driver doesn't support GVT-g.\n");
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
default:
break;
}
pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
vgpu->failsafe = true;
}
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes) unsigned int fence_num, void *p_data, unsigned int bytes)
{ {
if (fence_num >= vgpu_fence_sz(vgpu)) { if (fence_num >= vgpu_fence_sz(vgpu)) {
gvt_err("vgpu%d: found oob fence register access\n",
vgpu->id); /* When guest access oob fence regs without access
gvt_err("vgpu%d: total fence num %d access fence num %d\n", * pv_info first, we treat guest not supporting GVT,
vgpu->id, vgpu_fence_sz(vgpu), fence_num); * and we will let vgpu enter failsafe mode.
*/
if (!vgpu->pv_notified)
enter_failsafe_mode(vgpu,
GVT_FAILSAFE_UNSUPPORTED_GUEST);
if (!vgpu->mmio.disable_warn_untrack) {
gvt_err("vgpu%d: found oob fence register access\n",
vgpu->id);
gvt_err("vgpu%d: total fence %d, access fence %d\n",
vgpu->id, vgpu_fence_sz(vgpu),
fence_num);
}
memset(p_data, 0, bytes); memset(p_data, 0, bytes);
return -EINVAL;
} }
return 0; return 0;
} }
...@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0; return 0;
} }
/* ascendingly sorted */
static i915_reg_t force_nonpriv_white_list[] = {
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
_MMIO(0x2698),
_MMIO(0x4de0),
_MMIO(0x4de4),
_MMIO(0x4dfc),
GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
_MMIO(0x7014),
HDC_CHICKEN0,//_MMIO(0x7300)
GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
_MMIO(0x7700),
_MMIO(0x7704),
_MMIO(0x7708),
_MMIO(0x770c),
_MMIO(0xb110),
GEN8_L3SQCREG4,//_MMIO(0xb118)
_MMIO(0xe100),
_MMIO(0xe18c),
_MMIO(0xe48c),
_MMIO(0xe5f4),
};
/* a simple bsearch */
static inline bool in_whitelist(unsigned int reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
while (left < right) {
int mid = (left + right)/2;
if (reg > array[mid].reg)
left = mid + 1;
else if (reg < array[mid].reg)
right = mid;
else
return true;
}
return false;
}
static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = *(u32 *)p_data;
int ret = -EINVAL;
if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
vgpu->id, offset, bytes);
return ret;
}
if (in_whitelist(reg_nonpriv)) {
ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
} else {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
vgpu->id, reg_nonpriv);
}
return ret;
}
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
...@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
if (invalid_read) if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n", gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data); offset, bytes, *(u32 *)p_data);
vgpu->pv_notified = true;
return 0; return 0;
} }
...@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) ...@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
char vmid_str[20]; char vmid_str[20];
char display_ready_str[20]; char display_ready_str[20];
snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
env[0] = display_ready_str; env[0] = display_ready_str;
snprintf(vmid_str, 20, "VMID=%d", vgpu->id); snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
...@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_lo): case _vgtif_reg(execlist_context_descriptor_lo):
case _vgtif_reg(execlist_context_descriptor_hi): case _vgtif_reg(execlist_context_descriptor_hi):
break; break;
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break;
default: default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data); offset, bytes, data);
...@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
switch (cmd) { switch (cmd) {
case 0x6: case GEN9_PCODE_READ_MEM_LATENCY:
/** if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
* "Read memory latency" command on gen9. /**
* Below memory latency values are read * "Read memory latency" command on gen9.
* from skylake platform. * Below memory latency values are read
*/ * from skylake platform.
if (!*data0) */
*data0 = 0x1e1a1100; if (!*data0)
else *data0 = 0x1e1a1100;
*data0 = 0x61514b3d; else
*data0 = 0x61514b3d;
}
break;
case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break; break;
case 0x5: case GEN6_PCODE_READ_RC6VIDS:
*data0 |= 0x1; *data0 |= 0x1;
break; break;
} }
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
vgpu->id, value, *data0); vgpu->id, value, *data0);
/**
value &= ~(1 << 31); * PCODE_READY clear means ready for pcode read/write,
* PCODE_ERROR_MASK clear means no error happened. In GVT-g we
* always emulate as pcode read/write success and ready for access
* anytime, since we don't touch real physical registers here.
*/
value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
} }
...@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist; bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
/* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
(data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
&& !vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
...@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, ...@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_GM(reg, d, r, w) \ #define MMIO_GM(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
#define MMIO_GM_RDR(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
#define MMIO_RO(reg, d, f, rm, r, w) \ #define MMIO_RO(reg, d, f, rm, r, w) \
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
...@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, ...@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_RING_GM(prefix, d, r, w) \ #define MMIO_RING_GM(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
#define MMIO_RING_GM_RDR(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \ #define MMIO_RING_RO(prefix, d, f, rm, r, w) \
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
...@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret; int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL); MMIO_D(SDEISR, D_ALL);
MMIO_RING_D(RING_HWSTAM, D_ALL); MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
#define RING_REG(base) (base + 0x28) #define RING_REG(base) (base + 0x28)
MMIO_RING_D(RING_REG, D_ALL); MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG #undef RING_REG
#define RING_REG(base) (base + 0x134) #define RING_REG(base) (base + 0x134)
MMIO_RING_D(RING_REG, D_ALL); MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG #undef RING_REG
MMIO_GM(0x2148, D_ALL, NULL, NULL); MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
MMIO_GM(CCID, D_ALL, NULL, NULL); MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
MMIO_GM(0x12198, D_ALL, NULL, NULL); MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL); MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_D(RING_TAIL, D_ALL); MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_D(RING_HEAD, D_ALL); MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_D(RING_CTL, D_ALL); MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_D(RING_ACTHD, D_ALL); MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */ /* RING MODE */
#define RING_REG(base) (base + 0x29c) #define RING_REG(base) (base + 0x29c)
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
#undef RING_REG #undef RING_REG
MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); NULL, NULL);
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL); ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL); ring_timestamp_mmio_read, NULL);
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAM_ECOCHK, D_ALL); MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x9030, D_ALL); MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x20a0, D_ALL); MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2420, D_ALL); MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2430, D_ALL); MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2434, D_ALL); MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2438, D_ALL); MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x243c, D_ALL); MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */ /* display */
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
...@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(FORCEWAKE_ACK, D_ALL); MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
MMIO_D(GTFIFODBG, D_ALL); MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GTFIFOCTL, D_ALL); MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL); MMIO_D(ECOBUS, D_ALL);
...@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
MMIO_D(GEN6_PCODE_DATA, D_ALL); MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL); MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
...@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x1a054, D_ALL); MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL); MMIO_D(0x44070, D_ALL);
MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x215c, D_HSW_PLUS);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
MMIO_D(GEN7_OACONTROL, D_HSW); MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2b00, D_BDW_PLUS); MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS); MMIO_D(0x2360, D_BDW_PLUS);
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(BCS_SWCTRL, D_ALL); MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
...@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) ...@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0; return 0;
} }
...@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret; int ret;
MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler); intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
...@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler); intel_vgpu_reg_master_irq_handler);
MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
MMIO_D(0x1c134, D_BDW_PLUS); F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); NULL, NULL);
MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); F_CMD_ACCESS, NULL, NULL);
MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
NULL, NULL); MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, F_CMD_ACCESS, NULL, NULL);
NULL, NULL); MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL); ring_timestamp_mmio_read, NULL);
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#define RING_REG(base) (base + 0xd0) #define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0, MMIO_RING_F(RING_REG, 4, F_RO, 0,
...@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#undef RING_REG #undef RING_REG
#define RING_REG(base) (base + 0x234) #define RING_REG(base) (base + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); NULL, NULL);
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG #undef RING_REG
#define RING_REG(base) (base + 0x244) #define RING_REG(base) (base + 0x244)
MMIO_RING_D(RING_REG, D_BDW_PLUS); MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
NULL, NULL);
#undef RING_REG #undef RING_REG
#define RING_REG(base) (base + 0x370) #define RING_REG(base) (base + 0x370)
...@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(0x1c054, D_BDW_PLUS); MMIO_D(0x1c054, D_BDW_PLUS);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
...@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG #undef RING_REG
MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW); MMIO_D(WM_MISC, D_BDW);
MMIO_D(BDW_EDP_PSR_BASE, D_BDW); MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
...@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
MMIO_D(0xfdc, D_BDW); MMIO_D(0xfdc, D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); NULL, NULL);
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb1f0, D_BDW); MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb1c0, D_BDW); MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb100, D_BDW); MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb10c, D_BDW); MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb110, D_BDW); MMIO_D(0xb110, D_BDW);
MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); NULL, force_nonpriv_write);
MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_D(0x22040, D_BDW_PLUS);
MMIO_D(0x44484, D_BDW_PLUS);
MMIO_D(0x4448c, D_BDW_PLUS);
MMIO_D(0x83a4, D_BDW); MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
MMIO_D(0x8430, D_BDW); MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x110000, D_BDW_PLUS); MMIO_D(0x110000, D_BDW_PLUS);
...@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2248, D_BDW); MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0; return 0;
} }
...@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
...@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL); MMIO_D(0xd08, D_SKL);
MMIO_D(0x20e0, D_SKL); MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
MMIO_D(0x20ec, D_SKL); MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */ /* TRTT */
MMIO_D(0x4de0, D_SKL); MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x4de4, D_SKL); MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x4de8, D_SKL); MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x4dec, D_SKL); MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x4df0, D_SKL); MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
MMIO_D(0x45008, D_SKL); MMIO_D(0x45008, D_SKL);
...@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL); MMIO_D(0x65f08, D_SKL);
MMIO_D(0x320f0, D_SKL); MMIO_D(0x320f0, D_SKL);
MMIO_D(_REG_VCS2_EXCC, D_SKL); MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL); MMIO_D(0x70034, D_SKL);
MMIO_D(0x71034, D_SKL); MMIO_D(0x71034, D_SKL);
MMIO_D(0x72034, D_SKL); MMIO_D(0x72034, D_SKL);
...@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
MMIO_D(0x44500, D_SKL); MMIO_D(0x44500, D_SKL);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
return 0; return 0;
} }
......
...@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, ...@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr; dma_addr_t daddr;
page = pfn_to_page(pfn); if (unlikely(!pfn_valid(pfn)))
if (is_error_page(page))
return -EFAULT; return -EFAULT;
page = pfn_to_page(pfn);
daddr = dma_map_page(dev, page, 0, PAGE_SIZE, daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) if (dma_mapping_error(dev, daddr))
...@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, ...@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0; return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\n", "fence: %d\nresolution: %s\n",
BYTES_TO_MB(type->low_gm_size), BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size), BYTES_TO_MB(type->high_gm_size),
type->fence); type->fence, vgpu_edid_str(type->resolution));
} }
static MDEV_TYPE_ATTR_RO(available_instances); static MDEV_TYPE_ATTR_RO(available_instances);
......
...@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) ...@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \ (reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
struct intel_gvt *gvt = NULL;
void *pt = NULL;
unsigned int offset = 0;
if (!vgpu || !p_data)
return;
gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (reg_is_mmio(gvt, offset)) {
if (read)
intel_vgpu_default_mmio_read(vgpu, offset, p_data,
bytes);
else
intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes);
} else if (reg_is_gtt(gvt, offset) &&
vgpu->gtt.ggtt_mm->virtual_page_table) {
offset -= gvt->device_info.gtt_start_offset;
pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
if (read)
memcpy(p_data, pt, bytes);
else
memcpy(pt, p_data, bytes);
} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
/* Since we enter the failsafe mode early during guest boot,
* guest may not have chance to set up its ppgtt table, so
* there should not be any wp pages for guest. Keep the wp
* related code here in case we need to handle it in furture.
*/
gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
if (gp) {
/* remove write protection to prevent furture traps */
intel_vgpu_clean_guest_page(vgpu, gp);
if (read)
intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
else
intel_gvt_hypervisor_write_gpa(vgpu, pa,
p_data, bytes);
}
}
mutex_unlock(&gvt->lock);
}
/** /**
* intel_vgpu_emulate_mmio_read - emulate MMIO read * intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU * @vgpu: a vGPU
...@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0; unsigned int offset = 0;
int ret = -EINVAL; int ret = -EINVAL;
if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
}
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
...@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
u32 old_vreg = 0, old_sreg = 0; u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL; int ret = -EINVAL;
if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
return 0;
}
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
...@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack) if (!mmio && !vgpu->mmio.disable_warn_untrack)
gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data); vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) { if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
...@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) ...@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false;
} }
/** /**
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{ {
void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf; u8 *buf;
int i; int i;
...@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) ...@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
if (!vgpu_opregion(vgpu)->va) if (!vgpu_opregion(vgpu)->va)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
INTEL_GVT_OPREGION_SIZE); INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
......
...@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = { ...@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false}, {RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false}, {RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false}, {RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x24e0), 0, false},
{RCS, _MMIO(0x24e4), 0, false},
{RCS, _MMIO(0x24e8), 0, false},
{RCS, _MMIO(0x24ec), 0, false},
{RCS, _MMIO(0x24f0), 0, false},
{RCS, _MMIO(0x24f4), 0, false},
{RCS, _MMIO(0x24f8), 0, false},
{RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true}, {RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true}, {RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true}, {RCS, _MMIO(0x7000), 0xffff, true},
...@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = { ...@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false}, {RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false}, {RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false}, {RCS, _MMIO(0x24dc), 0, false},
{RCS, _MMIO(0x24e0), 0, false},
{RCS, _MMIO(0x24e4), 0, false},
{RCS, _MMIO(0x24e8), 0, false},
{RCS, _MMIO(0x24ec), 0, false},
{RCS, _MMIO(0x24f0), 0, false},
{RCS, _MMIO(0x24f4), 0, false},
{RCS, _MMIO(0x24f8), 0, false},
{RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true}, {RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true}, {RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true}, {RCS, _MMIO(0x7000), 0xffff, true},
......
...@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -139,6 +139,9 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_vgpu_workload *workload = struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id]; scheduler->current_workload[req->engine->id];
if (unlikely(!workload))
return NOTIFY_OK;
switch (action) { switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN: case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu, intel_gvt_load_render_mmio(workload->vgpu,
...@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -148,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu, intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id); workload->ring_id);
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
* good. If the status is NOT -EINPROGRESS means there
* is something wrong happened during dispatching and
* the status should not be set to zero
*/
if (workload->status == -EINPROGRESS)
workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
default: default:
...@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -359,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id]; workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu; vgpu = workload->vgpu;
if (!workload->status && !vgpu->resetting) { /* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed.
* For the workload w/o request, directly complete the workload.
*/
if (workload->req) {
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
update_guest_context(workload); i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events, for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX) INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
}
} }
gvt_dbg_sched("ring id %d complete workload %p status %d\n", gvt_dbg_sched("ring id %d complete workload %p status %d\n",
...@@ -397,7 +417,6 @@ static int workload_thread(void *priv) ...@@ -397,7 +417,6 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id; int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
long lret;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
...@@ -446,23 +465,24 @@ static int workload_thread(void *priv) ...@@ -446,23 +465,24 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload); workload->ring_id, workload);
retry:
lret = i915_wait_request(workload->req, i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT); 0, MAX_SCHEDULE_TIMEOUT);
if (lret < 0) { /* I915 has replay mechanism and a request will be replayed
workload->status = lret; * if there is i915 reset. So the seqno will be updated anyway.
gvt_err("fail to wait workload, skip\n"); * If the seqno is not updated yet after waiting, which means
} else { * the replay may still be in progress and we can wait again.
workload->status = 0; */
if (!i915_gem_request_completed(workload->req)) {
gvt_dbg_sched("workload %p not completed, wait again\n",
workload);
goto retry;
} }
complete: complete:
gvt_dbg_sched("will complete workload %p, status: %d\n", gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status); workload, workload->status);
if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));
complete_current_workload(gvt, ring_id); complete_current_workload(gvt, ring_id);
if (need_force_wake) if (need_force_wake)
......
...@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) ...@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
} }
static struct {
unsigned int low_mm;
unsigned int high_mm;
unsigned int fence;
enum intel_vgpu_edid edid;
char *name;
} vgpu_types[] = {
/* Fixed vGPU type table */
{ MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
{ MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
};
/** /**
* intel_gvt_init_vgpu_types - initialize vGPU type list * intel_gvt_init_vgpu_types - initialize vGPU type list
* @gvt : GVT device * @gvt : GVT device
...@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
unsigned int min_low; unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains /* vGPU type name is defined as GVTg_Vx_y which contains
* physical GPU generation type and 'y' means maximum vGPU * physical GPU generation type (e.g V4 as BDW server, V5 as
* instances user can create on one physical GPU for this * SKL server).
* type.
* *
* Depend on physical SKU resource, might see vGPU types like * Depend on physical SKU resource, might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
...@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/ */
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = 4; num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
GFP_KERNEL); GFP_KERNEL);
...@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) ...@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
min_low = MB_TO_BYTES(32); min_low = MB_TO_BYTES(32);
for (i = 0; i < num_types; ++i) { for (i = 0; i < num_types; ++i) {
if (low_avail / min_low == 0) if (low_avail / vgpu_types[i].low_mm == 0)
break; break;
gvt->types[i].low_gm_size = min_low;
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
gvt->types[i].fence = 4; gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
gvt->types[i].max_instance = min(low_avail / min_low, gvt->types[i].fence = vgpu_types[i].fence;
high_avail / gvt->types[i].high_gm_size); gvt->types[i].resolution = vgpu_types[i].edid;
gvt->types[i].avail_instance = gvt->types[i].max_instance; gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv)) if (IS_GEN8(gvt->dev_priv))
sprintf(gvt->types[i].name, "GVTg_V4_%u", sprintf(gvt->types[i].name, "GVTg_V4_%s",
gvt->types[i].max_instance); vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv)) else if (IS_GEN9(gvt->dev_priv))
sprintf(gvt->types[i].name, "GVTg_V5_%u", sprintf(gvt->types[i].name, "GVTg_V5_%s",
gvt->types[i].max_instance); vgpu_types[i].name);
min_low <<= 1; gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", i, gvt->types[i].name,
i, gvt->types[i].name, gvt->types[i].max_instance,
gvt->types[i].avail_instance, gvt->types[i].avail_instance,
gvt->types[i].low_gm_size, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence); gvt->types[i].high_gm_size, gvt->types[i].fence,
vgpu_edid_str(gvt->types[i].resolution));
} }
gvt->num_types = i; gvt->num_types = i;
...@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) ...@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{ {
int i; int i;
unsigned int low_gm_avail, high_gm_avail, fence_avail; unsigned int low_gm_avail, high_gm_avail, fence_avail;
unsigned int low_gm_min, high_gm_min, fence_min, total_min; unsigned int low_gm_min, high_gm_min, fence_min;
/* Need to depend on maxium hw resource size but keep on /* Need to depend on maxium hw resource size but keep on
* static config for now. * static config for now.
...@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) ...@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
fence_min = fence_avail / gvt->types[i].fence; fence_min = fence_avail / gvt->types[i].fence;
total_min = min(min(low_gm_min, high_gm_min), fence_min); gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
gvt->types[i].avail_instance = min(gvt->types[i].max_instance, fence_min);
total_min);
gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
i, gvt->types[i].name, gvt->types[i].max_instance, i, gvt->types[i].name,
gvt->types[i].avail_instance, gvt->types[i].low_gm_size, gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence); gvt->types[i].high_gm_size, gvt->types[i].fence);
} }
...@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_detach_hypervisor_vgpu;
ret = intel_vgpu_init_display(vgpu); ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret) if (ret)
goto out_clean_gtt; goto out_clean_gtt;
...@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size; param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size; param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence; param.fence_sz = type->fence;
param.resolution = type->resolution;
/* XXX current param based on MB */ /* XXX current param based on MB */
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
...@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
populate_pvinfo_page(vgpu); populate_pvinfo_page(vgpu);
intel_vgpu_reset_display(vgpu); intel_vgpu_reset_display(vgpu);
if (dmlr) if (dmlr) {
intel_vgpu_reset_cfg_space(vgpu); intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
vgpu->pv_notified = false;
}
} }
vgpu->resetting = false; vgpu->resetting = false;
......
...@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) ...@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
switch (format) { switch (format) {
case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB565:
dev_dbg(drm->dev, "Setting up RGB565 mode\n"); dev_dbg(drm->dev, "Setting up RGB565 mode\n");
ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
ctrl |= CTRL_SET_WORD_LENGTH(0); ctrl |= CTRL_SET_WORD_LENGTH(0);
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
break; break;
case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XRGB8888:
dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
ctrl |= CTRL_SET_WORD_LENGTH(3); ctrl |= CTRL_SET_WORD_LENGTH(3);
/* Do not use packed pixels = one pixel per word instead. */ /* Do not use packed pixels = one pixel per word instead. */
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
...@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) ...@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
return 0; return 0;
} }
static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
{
struct drm_crtc *crtc = &mxsfb->pipe.crtc;
struct drm_device *drm = crtc->dev;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
u32 reg;
reg = readl(mxsfb->base + LCDC_CTRL);
if (mxsfb->connector.display_info.num_bus_formats)
bus_format = mxsfb->connector.display_info.bus_formats[0];
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
break;
case MEDIA_BUS_FMT_RGB666_1X18:
reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
break;
case MEDIA_BUS_FMT_RGB888_1X24:
reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
break;
default:
dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
break;
}
writel(reg, mxsfb->base + LCDC_CTRL);
}
static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
{ {
u32 reg; u32 reg;
...@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) ...@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
if (m->flags & DRM_MODE_FLAG_PVSYNC) if (m->flags & DRM_MODE_FLAG_PVSYNC)
vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
if (bus_flags & DRM_BUS_FLAG_DE_HIGH) /* Make sure Data Enable is high active by default */
if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) /*
* DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
* controllers VDCTRL0_DOTCLK is display centric.
* Drive on positive edge -> display samples on falling edge
* DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
*/
if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
mxsfb_set_bus_fmt(mxsfb);
/* Frame length in lines. */ /* Frame length in lines. */
writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
...@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) ...@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
mxsfb->base + LCDC_VDCTRL2); mxsfb->base + LCDC_VDCTRL2);
writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) | writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end), SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
mxsfb->base + LCDC_VDCTRL3); mxsfb->base + LCDC_VDCTRL3);
writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
......
...@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, ...@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
{ {
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb); mxsfb_crtc_enable(mxsfb);
drm_panel_enable(mxsfb->panel);
} }
static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
{ {
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
drm_panel_disable(mxsfb->panel);
mxsfb_crtc_disable(mxsfb); mxsfb_crtc_disable(mxsfb);
drm_panel_unprepare(mxsfb->panel);
} }
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
......
...@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm, ...@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
int mxsfb_create_output(struct drm_device *drm) int mxsfb_create_output(struct drm_device *drm)
{ {
struct mxsfb_drm_private *mxsfb = drm->dev_private;
struct device_node *ep_np = NULL; struct device_node *ep_np = NULL;
struct of_endpoint ep; struct of_endpoint ep;
int ret; int ret;
...@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm) ...@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
} }
} }
if (!mxsfb->panel)
return -EPROBE_DEFER;
return 0; return 0;
} }
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define CTRL_DATA_SELECT (1 << 16) #define CTRL_DATA_SELECT (1 << 16)
#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) #define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) #define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) #define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) #define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
#define CTRL_MASTER (1 << 5) #define CTRL_MASTER (1 << 5)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册