提交 14ee642c 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Features:
- Engine discovery query (Tvrtko)
- Support for DP YCbCr4:2:0 outputs (Gwan-gyeong)
- HDCP revocation support, refactoring (Ramalingam)
- Remove DRM_AUTH from IOCTLs which also have DRM_RENDER_ALLOW (Christian König)
- Asynchronous display power disabling (Imre)
- Perma-pin uC firmware and re-enable global reset (Fernando)
- GTT remapping for display, for bigger fb size and stride (Ville)
- Enable pipe HDR mode on ICL if only HDR planes are used (Ville)
- Kconfig to tweak the busyspin durations for i915_wait_request (Chris)
- Allow multiple user handles to the same VM (Chris)
- GT/GEM runtime pm improvements using wakerefs (Chris)
- Gen 4&5 render context support (Chris)
- Allow userspace to clone contexts on creation (Chris)
- SINGLE_TIMELINE flags for context creation (Chris)
- Allow specification of parallel execbuf (Chris)

Refactoring:
- Header refactoring (Jani)
- Move GraphicsTechnology files under gt/ (Chris)
- Sideband code refactoring (Chris)

Fixes:
- ICL DSI state readout and checker fixes (Vandita)
- GLK DSI picture corruption fix (Stanislav)
- HDMI deep color fixes (Clinton, Aditya)
- Fix driver unbinding from a device in use (Janusz)
- Fix clock gating with pipe scaling (Radhakrishna)
- Disable broken FBC on GLK (Daniel Drake)
- Miscellaneous GuC fixes (Michal)
- Fix MG PHY DP register programming (Imre)
- Add missing combo PHY lane power setup (Imre)
- Workarounds for early ICL VBT issues (Imre)
- Fix fastset vs. pfit on/off on HSW EDP transcoder (Ville)
- Add readout and state check for pch_pfit.force_thru (Ville)
- Miscellaneous display fixes and refactoring (Ville)
- Display workaround fixes (Ville)
- Enable audio even if ELD is bogus (Ville)
- Fix use-after-free in reporting create.size (Chris)
- Sideband fixes to avoid BYT hard lockups (Chris)
- Workaround fixes and improvements (Chris)

Maintainer shortcomings:
- Failure to adequately describe and give credit for all changes (Jani)
Signed-off-by: NDave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87sgt3n45z.fsf@intel.com
......@@ -181,6 +181,12 @@ Panel Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_panel_orientation_quirks.c
:export:
HDCP Helper Functions Reference
===============================
.. kernel-doc:: drivers/gpu/drm/drm_hdcp.c
:export:
Display Port Helper Functions Reference
=======================================
......
......@@ -17,7 +17,7 @@ drm-y := drm_auth.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_atomic_uapi.o
drm_atomic_uapi.o drm_hdcp.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
......
......@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
int analogix_dp_enable_psr(struct analogix_dp_device *dp)
{
struct edp_vsc_psr psr_vsc;
struct dp_sdp psr_vsc;
if (!dp->psr_enable)
return 0;
......@@ -127,8 +127,8 @@ int analogix_dp_enable_psr(struct analogix_dp_device *dp)
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
psr_vsc.DB0 = 0;
psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
psr_vsc.db[0] = 0;
psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
......@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
int analogix_dp_disable_psr(struct analogix_dp_device *dp)
{
struct edp_vsc_psr psr_vsc;
struct dp_sdp psr_vsc;
int ret;
if (!dp->psr_enable)
......@@ -149,8 +149,8 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
psr_vsc.DB0 = 0;
psr_vsc.DB1 = 0;
psr_vsc.db[0] = 0;
psr_vsc.db[1] = 0;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
if (ret != 1) {
......
......@@ -254,7 +254,7 @@ void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
struct edp_vsc_psr *vsc, bool blocking);
struct dp_sdp *vsc, bool blocking);
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
struct drm_dp_aux_msg *msg);
......
......@@ -1041,7 +1041,7 @@ static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp)
}
int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
struct edp_vsc_psr *vsc, bool blocking)
struct dp_sdp *vsc, bool blocking)
{
unsigned int val;
int ret;
......@@ -1069,8 +1069,8 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3);
/* configure DB0 / DB1 values */
writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0);
writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1);
/* set reuse spd inforframe */
val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
......@@ -1092,8 +1092,8 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
psr_status >= 0 &&
((vsc->DB1 && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
(!vsc->DB1 && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
(!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
DP_TIMEOUT_PSR_LOOP_MS * 1000);
if (ret) {
dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
......
......@@ -741,7 +741,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->content_type = val;
} else if (property == connector->scaling_mode_property) {
state->scaling_mode = val;
} else if (property == connector->content_protection_property) {
} else if (property == config->content_protection_property) {
if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
DRM_DEBUG_KMS("only drivers can set CP Enabled\n");
return -EINVAL;
......@@ -826,7 +826,7 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
} else if (property == config->hdr_output_metadata_property) {
*val = state->hdr_output_metadata ?
state->hdr_output_metadata->base.id : 0;
} else if (property == connector->content_protection_property) {
} else if (property == config->content_protection_property) {
*val = state->content_protection;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
......
......@@ -823,13 +823,6 @@ static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
drm_tv_subconnector_enum_list)
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
{ DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
static const struct drm_prop_enum_list hdmi_colorspaces[] = {
/* For Default case, driver will set the colorspace */
{ DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
......@@ -1515,42 +1508,6 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_connector_attach_scaling_mode_property);
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
* The content protection will be set to &drm_connector_state.content_protection
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop;
prop = drm_property_create_enum(dev, 0, "Content Protection",
drm_cp_enum_list,
ARRAY_SIZE(drm_cp_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
connector->content_protection_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
/**
* drm_mode_create_aspect_ratio_property - create aspect ratio property
* @dev: DRM device
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Intel Corporation.
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_print.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_connector.h>
#include "drm_internal.h"
static struct hdcp_srm {
u32 revoked_ksv_cnt;
u8 *revoked_ksv_list;
/* Mutex to protect above struct member */
struct mutex mutex;
} *srm_data;
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
ksv[0], ksv[1], ksv[2], ksv[3], ksv[4]);
}
static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
{
u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz;
while (parsed_bytes < vrls_length) {
vrl_ksv_cnt = *buf;
ksv_count += vrl_ksv_cnt;
vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1;
buf += vrl_sz;
parsed_bytes += vrl_sz;
}
/*
* When vrls are not valid, ksvs are not considered.
* Hence SRM will be discarded.
*/
if (parsed_bytes != vrls_length)
ksv_count = 0;
return ksv_count;
}
static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
u32 vrls_length)
{
u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
do {
vrl_ksv_cnt = *buf;
vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN;
buf++;
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
} while (parsed_bytes < vrls_length);
return ksv_count;
}
static inline u32 get_vrl_length(const u8 *buf)
{
return drm_hdcp_be24_to_cpu(buf);
}
static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_1_4_VRL_LENGTH_SIZE + DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id,
be16_to_cpu(header->srm_version), header->srm_gen_no);
WARN_ON(header->reserved);
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE +
DRM_HDCP_1_4_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE;
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return count;
}
kfree(srm_data->revoked_ksv_list);
srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
GFP_KERNEL);
if (!srm_data->revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
vrl_length) != ksv_count) {
srm_data->revoked_ksv_cnt = 0;
kfree(srm_data->revoked_ksv_list);
return -EINVAL;
}
srm_data->revoked_ksv_cnt = ksv_count;
return count;
}
static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
if (count < (sizeof(struct hdcp_srm_header) +
DRM_HDCP_2_VRL_LENGTH_SIZE + DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length\n");
return -EINVAL;
}
header = (struct hdcp_srm_header *)buf;
DRM_DEBUG("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n",
header->srm_id & DRM_HDCP_SRM_ID_MASK,
be16_to_cpu(header->srm_version), header->srm_gen_no);
if (header->reserved)
return -EINVAL;
buf = buf + sizeof(*header);
vrl_length = get_vrl_length(buf);
if (count < (sizeof(struct hdcp_srm_header) + vrl_length) ||
vrl_length < (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE)) {
DRM_ERROR("Invalid blob length or vrl length\n");
return -EINVAL;
}
/* Length of the all vrls combined */
vrl_length -= (DRM_HDCP_2_VRL_LENGTH_SIZE +
DRM_HDCP_2_DCP_SIG_SIZE);
if (!vrl_length) {
DRM_ERROR("No vrl found\n");
return -EINVAL;
}
buf += DRM_HDCP_2_VRL_LENGTH_SIZE;
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
return count;
}
kfree(srm_data->revoked_ksv_list);
srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
GFP_KERNEL);
if (!srm_data->revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
ksv_sz = ksv_count * DRM_HDCP_KSV_LEN;
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
srm_data->revoked_ksv_cnt = ksv_count;
return count;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_1_4_SRM_ID << 4);
}
static inline bool is_srm_version_hdcp2(const u8 *buf)
{
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
static void drm_hdcp_srm_update(const u8 *buf, size_t count)
{
if (count < sizeof(struct hdcp_srm_header))
return;
if (is_srm_version_hdcp1(buf))
drm_hdcp_parse_hdcp1_srm(buf, count);
else if (is_srm_version_hdcp2(buf))
drm_hdcp_parse_hdcp2_srm(buf, count);
}
static void drm_hdcp_request_srm(struct drm_device *drm_dev)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
drm_dev->dev);
if (ret < 0)
goto exit;
if (fw->size && fw->data)
drm_hdcp_srm_update(fw->data, fw->size);
exit:
release_firmware(fw);
}
/**
* drm_hdcp_check_ksvs_revoked - Check the revoked status of the IDs
*
* @drm_dev: drm_device for which HDCP revocation check is requested
* @ksvs: List of KSVs (HDCP receiver IDs)
* @ksv_count: KSV count passed in through @ksvs
*
* This function reads the HDCP System renewability Message(SRM Table)
* from userspace as a firmware and parses it for the revoked HDCP
* KSVs(Receiver IDs) detected by DCP LLC. Once the revoked KSVs are known,
* revoked state of the KSVs in the list passed in by display drivers are
* decided and response is sent.
*
* SRM should be presented in the name of "display_hdcp_srm.bin".
*
* Returns:
* TRUE on any of the KSV is revoked, else FALSE.
*/
bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
u32 ksv_count)
{
u32 rev_ksv_cnt, cnt, i, j;
u8 *rev_ksv_list;
if (!srm_data)
return false;
mutex_lock(&srm_data->mutex);
drm_hdcp_request_srm(drm_dev);
rev_ksv_cnt = srm_data->revoked_ksv_cnt;
rev_ksv_list = srm_data->revoked_ksv_list;
/* If the Revoked ksv list is empty */
if (!rev_ksv_cnt || !rev_ksv_list) {
mutex_unlock(&srm_data->mutex);
return false;
}
for (cnt = 0; cnt < ksv_count; cnt++) {
rev_ksv_list = srm_data->revoked_ksv_list;
for (i = 0; i < rev_ksv_cnt; i++) {
for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
if (ksvs[j] != rev_ksv_list[j]) {
break;
} else if (j == (DRM_HDCP_KSV_LEN - 1)) {
DRM_DEBUG("Revoked KSV is ");
drm_hdcp_print_ksv(ksvs);
mutex_unlock(&srm_data->mutex);
return true;
}
/* Move the offset to next KSV in the revoked list */
rev_ksv_list += DRM_HDCP_KSV_LEN;
}
/* Iterate to next ksv_offset */
ksvs += DRM_HDCP_KSV_LEN;
}
mutex_unlock(&srm_data->mutex);
return false;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
int drm_setup_hdcp_srm(struct class *drm_class)
{
srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
if (!srm_data)
return -ENOMEM;
mutex_init(&srm_data->mutex);
return 0;
}
void drm_teardown_hdcp_srm(struct class *drm_class)
{
if (srm_data) {
kfree(srm_data->revoked_ksv_list);
kfree(srm_data);
}
}
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
{ DRM_MODE_CONTENT_PROTECTION_ENABLED, "Enabled" },
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
* The content protection will be set to &drm_connector_state.content_protection
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop =
dev->mode_config.content_protection_property;
if (!prop)
prop = drm_property_create_enum(dev, 0, "Content Protection",
drm_cp_enum_list,
ARRAY_SIZE(drm_cp_enum_list));
if (!prop)
return -ENOMEM;
drm_object_attach_property(&connector->base, prop,
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
dev->mode_config.content_protection_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
......@@ -108,6 +108,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector);
void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
struct drm_gem_object;
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
......@@ -203,3 +204,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
/* drm_hdcp.c */
int drm_setup_hdcp_srm(struct class *drm_class);
void drm_teardown_hdcp_srm(struct class *drm_class);
......@@ -78,6 +78,7 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
drm_setup_hdcp_srm(drm_class);
return 0;
}
......@@ -90,6 +91,7 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
......
......@@ -133,3 +133,9 @@ depends on DRM_I915
depends on EXPERT
source "drivers/gpu/drm/i915/Kconfig.debug"
endmenu
menu "drm/i915 Profile Guided Optimisation"
visible if EXPERT
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
config DRM_I915_SPIN_REQUEST
int
default 5 # microseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
take a non-negligible time to setup, we do a short spin first to
check if the request will complete in the time it would have taken
us to enable the interrupt.
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
......@@ -35,32 +35,56 @@ subdir-ccflags-y += \
# Extra header tests
include $(src)/Makefile.header-test
subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
# core driver code
i915-y += i915_drv.o \
i915_irq.o \
i915_memcpy.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
i915_reset.o \
i915_suspend.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o \
intel_workarounds.o
intel_wakeref.o \
intel_uncore.o
# core library code
i915-y += \
i915_memcpy.o \
i915_mm.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code
# "Graphics Technology" (aka we talk to the gpu)
obj-y += gt/
gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
gt/intel_engine_pm.o \
gt/intel_gt_pm.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
gt/intel_reset.o \
gt/intel_ringbuffer.o \
gt/intel_mocs.o \
gt/intel_sseu.o \
gt/intel_workarounds.o
gt-$(CONFIG_DRM_I915_SELFTEST) += \
gt/mock_engine.o
i915-y += $(gt-y)
# GEM (Graphics Execution Management) code
i915-y += \
i915_active.o \
i915_cmd_parser.o \
......@@ -75,6 +99,7 @@ i915-y += \
i915_gem_internal.o \
i915_gem.o \
i915_gem_object.o \
i915_gem_pm.o \
i915_gem_render_state.o \
i915_gem_shrinker.o \
i915_gem_stolen.o \
......@@ -88,14 +113,6 @@ i915-y += \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
intel_uncore.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
......@@ -159,8 +176,8 @@ i915-y += dvo_ch7017.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_gmbus.o \
intel_hdmi.o \
intel_i2c.o \
intel_lspcon.o \
intel_lvds.o \
intel_panel.o \
......@@ -176,6 +193,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
selftests/igt_gem_utils.o \
selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
......
......@@ -4,37 +4,65 @@
# Test the headers are compilable as standalone units
header_test := \
i915_active_types.h \
i915_debugfs.h \
i915_drv.h \
i915_gem_context_types.h \
i915_gem_pm.h \
i915_irq.h \
i915_params.h \
i915_priolist_types.h \
i915_reg.h \
i915_scheduler_types.h \
i915_timeline_types.h \
i915_utils.h \
intel_acpi.h \
intel_atomic.h \
intel_atomic_plane.h \
intel_audio.h \
intel_bios.h \
intel_cdclk.h \
intel_color.h \
intel_combo_phy.h \
intel_connector.h \
intel_context_types.h \
intel_crt.h \
intel_csr.h \
intel_ddi.h \
intel_dp.h \
intel_dp_aux_backlight.h \
intel_dp_link_training.h \
intel_dp_mst.h \
intel_dpio_phy.h \
intel_dpll_mgr.h \
intel_drv.h \
intel_dsi.h \
intel_dsi_dcs_backlight.h \
intel_dvo.h \
intel_engine_types.h \
intel_dvo_dev.h \
intel_fbc.h \
intel_fbdev.h \
intel_fifo_underrun.h \
intel_frontbuffer.h \
intel_gmbus.h \
intel_hdcp.h \
intel_hdmi.h \
intel_hotplug.h \
intel_lpe_audio.h \
intel_lspcon.h \
intel_lvds.h \
intel_overlay.h \
intel_panel.h \
intel_pipe_crc.h \
intel_pm.h \
intel_psr.h \
intel_quirks.h \
intel_runtime_pm.h \
intel_sdvo.h \
intel_sideband.h \
intel_sprite.h \
intel_tv.h \
intel_workarounds_types.h
intel_uncore.h \
intel_vdsc.h \
intel_wakeref.h
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
......
......@@ -25,7 +25,8 @@
*
*/
#include "dvo.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
#define CH7017_TV_DISPLAY_MODE 0x00
#define CH7017_FLICKER_FILTER 0x01
......
......@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
......
......@@ -29,7 +29,8 @@
*
*/
#include "dvo.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
/*
* register definitions for the i82807aa.
......
......@@ -26,9 +26,10 @@
*
*/
#include "dvo.h"
#include "i915_reg.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
......
......@@ -26,7 +26,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
#define SIL164_VID 0x0001
#define SIL164_DID 0x0006
......
......@@ -25,7 +25,8 @@
*
*/
#include "dvo.h"
#include "intel_drv.h"
#include "intel_dvo_dev.h"
/* register definitions according to the TFP410 data sheet */
#define TFP410_VID 0x014C
......
# Extra header tests
include $(src)/Makefile.header-test
# SPDX-License-Identifier: MIT
# Copyright © 2019 Intel Corporation
# Test the headers are compilable as standalone units
header_test := $(notdir $(wildcard $(src)/*.h))
quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@
header_test_%.c: %.h
$(call cmd,header_test)
extra-$(CONFIG_DRM_I915_WERROR) += \
$(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
......@@ -81,6 +81,22 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
__maybe_unused static bool
check_signal_order(struct intel_context *ce, struct i915_request *rq)
{
if (!list_is_last(&rq->signal_link, &ce->signals) &&
i915_seqno_passed(rq->fence.seqno,
list_next_entry(rq, signal_link)->fence.seqno))
return false;
if (!list_is_first(&rq->signal_link, &ce->signals) &&
i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
rq->fence.seqno))
return false;
return true;
}
static bool
__dma_fence_signal(struct dma_fence *fence)
{
......@@ -130,6 +146,8 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
GEM_BUG_ON(!check_signal_order(ce, rq));
if (!__request_completed(rq))
break;
......@@ -312,6 +330,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_add(&rq->signal_link, pos);
if (pos == &ce->signals) /* catch transitions from empty list */
list_move_tail(&ce->signal_link, &b->signalers);
GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
spin_unlock(&b->irq_lock);
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
static struct i915_global_context {
struct i915_global base;
struct kmem_cache *slab_ce;
} global;
static struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
void intel_context_free(struct intel_context *ce)
{
kmem_cache_free(global.slab_ce, ce);
}
struct intel_context *
intel_context_create(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, ctx, engine);
return ce;
}
int __intel_context_do_pin(struct intel_context *ce)
{
int err;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
if (likely(!atomic_read(&ce->pin_count))) {
intel_wakeref_t wakeref;
err = 0;
with_intel_runtime_pm(ce->engine->i915, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
intel_context_get(ce);
smp_mb__before_atomic(); /* flush pin before it is visible */
}
atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
mutex_unlock(&ce->pin_mutex);
return 0;
err:
mutex_unlock(&ce->pin_mutex);
return err;
}
void intel_context_unpin(struct intel_context *ce)
{
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
return;
/* We may be called from inside intel_context_pin() to evict another */
intel_context_get(ce);
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
ce->ops->unpin(ce);
i915_gem_context_put(ce->gem_context);
intel_context_put(ce);
}
mutex_unlock(&ce->pin_mutex);
intel_context_put(ce);
}
static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_context *ce =
container_of(active, typeof(*ce), active_tracker);
intel_context_unpin(ce);
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref);
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
ce->saturated = 0;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
}
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
}
static void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_ce);
}
static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit,
} };
int __init i915_global_context_init(void)
{
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
if (!global.slab_ce)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
}
void intel_context_exit_engine(struct intel_context *ce)
{
ce->saturated = 0;
intel_engine_pm_put(ce->engine);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_request *rq;
int err;
err = intel_context_pin(ce);
if (unlikely(err))
return ERR_PTR(err);
rq = i915_request_create(ce);
intel_context_unpin(ce);
return rq;
}
......@@ -12,58 +12,66 @@
#include "intel_context_types.h"
#include "intel_engine_types.h"
struct intel_context *intel_context_alloc(void);
void intel_context_free(struct intel_context *ce);
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_context_lookup - Find the matching HW context for this (ctx, engine)
* @ctx - the parent GEM context
* @engine - the target HW engine
*
* May return NULL if the HW context hasn't been instantiated (i.e. unused).
*/
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
intel_context_create(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
void intel_context_free(struct intel_context *ce);
/**
* intel_context_pin_lock - Stablises the 'pinned' status of the HW context
* @ctx - the parent GEM context
* @engine - the target HW engine
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
*
* Acquire a lock on the pinned status of the HW context, such that the context
* can neither be bound to the GPU or unbound whilst the lock is held, i.e.
* intel_context_is_pinned() remains stable.
*/
struct intel_context *
intel_context_pin_lock(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
static inline int intel_context_lock_pinned(struct intel_context *ce)
__acquires(ce->pin_mutex)
{
return mutex_lock_interruptible(&ce->pin_mutex);
}
/**
* intel_context_is_pinned - Reports the 'pinned' status
* @ce - the context
*
* While in use by the GPU, the context, along with its ring and page
* tables is pinned into memory and the GTT.
*
* Returns: true if the context is currently pinned for use by the GPU.
*/
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
return atomic_read(&ce->pin_count);
}
static inline void intel_context_pin_unlock(struct intel_context *ce)
__releases(ce->pin_mutex)
/**
* intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
* @ce - the context
*
* Releases the lock earlier acquired by intel_context_unlock_pinned().
*/
static inline void intel_context_unlock_pinned(struct intel_context *ce)
__releases(ce->pin_mutex)
{
mutex_unlock(&ce->pin_mutex);
}
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce);
void
__intel_context_remove(struct intel_context *ce);
int __intel_context_do_pin(struct intel_context *ce);
struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
static inline int intel_context_pin(struct intel_context *ce)
{
if (likely(atomic_inc_not_zero(&ce->pin_count)))
return 0;
return __intel_context_do_pin(ce);
}
static inline void __intel_context_pin(struct intel_context *ce)
{
......@@ -73,6 +81,27 @@ static inline void __intel_context_pin(struct intel_context *ce)
void intel_context_unpin(struct intel_context *ce);
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);
static inline void intel_context_enter(struct intel_context *ce)
{
if (!ce->active_count++)
ce->ops->enter(ce);
}
static inline void intel_context_mark_active(struct intel_context *ce)
{
++ce->active_count;
}
static inline void intel_context_exit(struct intel_context *ce)
{
GEM_BUG_ON(!ce->active_count);
if (!--ce->active_count)
ce->ops->exit(ce);
}
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
......@@ -84,4 +113,18 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
static inline void intel_context_timeline_lock(struct intel_context *ce)
__acquires(&ce->ring->timeline->mutex)
{
mutex_lock(&ce->ring->timeline->mutex);
}
static inline void intel_context_timeline_unlock(struct intel_context *ce)
__releases(&ce->ring->timeline->mutex)
{
mutex_unlock(&ce->ring->timeline->mutex);
}
struct i915_request *intel_context_create_request(struct intel_context *ce);
#endif /* __INTEL_CONTEXT_H__ */
......@@ -10,11 +10,11 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/types.h>
#include "i915_active_types.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
struct i915_gem_context;
struct i915_vma;
......@@ -25,20 +25,13 @@ struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce);
void (*enter)(struct intel_context *ce);
void (*exit)(struct intel_context *ce);
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
};
/*
* Powergating configuration for a particular (context,engine).
*/
struct intel_sseu {
u8 slice_mask;
u8 subslice_mask;
u8 min_eus_per_subslice;
u8 max_eus_per_subslice;
};
struct intel_context {
struct kref ref;
......@@ -46,7 +39,6 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *active;
struct list_head active_link;
struct list_head signal_link;
struct list_head signals;
......@@ -56,6 +48,8 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
unsigned int active_count; /* notionally protected by timeline->mutex */
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
......@@ -68,7 +62,6 @@ struct intel_context {
struct i915_active_request active_tracker;
const struct intel_context_ops *ops;
struct rb_node node;
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
......
......@@ -106,24 +106,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
static inline bool __execlists_need_preempt(int prio, int last)
{
/*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*
* More naturally we would write
* prio >= max(0, last);
* except that we wish to prevent triggering preemption at the same
* priority level: the task that is running should remain running
* to preserve FIFO ordering of dependencies.
*/
return prio > max(I915_PRIORITY_NORMAL - 1, last);
}
static inline void
execlists_set_active(struct intel_engine_execlists *execlists,
unsigned int bit)
......@@ -233,8 +215,6 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*/
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
#define I915_GEM_HWS_HANGCHECK 0x34
#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
......@@ -362,14 +342,16 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engines_init_mmio(struct drm_i915_private *i915);
int intel_engines_setup(struct drm_i915_private *i915);
int intel_engines_init(struct drm_i915_private *i915);
void intel_engines_cleanup(struct drm_i915_private *i915);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_ring_submission_init(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
......@@ -382,6 +364,8 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
......@@ -458,19 +442,14 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
{
if (engine->reset.reset)
engine->reset.reset(engine, stalled);
engine->serial++; /* contexts lost */
}
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
void intel_gt_resume(struct drm_i915_private *i915);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
void intel_engines_unpark(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
......@@ -567,17 +546,4 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
static inline u32
intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
{
return engine->hangcheck.next_seqno =
next_pseudo_random32(engine->hangcheck.next_seqno);
}
static inline u32
intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
{
return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
}
#endif /* _INTEL_RINGBUFFER_H_ */
......@@ -25,9 +25,11 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reset.h"
#include "intel_ringbuffer.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_lrc.h"
#include "intel_reset.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
......@@ -48,35 +50,24 @@
struct engine_class_info {
const char *name;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
u8 uabi_class;
};
static const struct engine_class_info intel_engine_classes[] = {
[RENDER_CLASS] = {
.name = "rcs",
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_RENDER,
},
[COPY_ENGINE_CLASS] = {
.name = "bcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_COPY,
},
[VIDEO_DECODE_CLASS] = {
.name = "vcs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO,
},
[VIDEO_ENHANCEMENT_CLASS] = {
.name = "vecs",
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_vebox_ring_buffer,
.uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
},
};
......@@ -212,6 +203,22 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
PAGE_SIZE);
case 5:
case 4:
/*
* There is a discrepancy here between the size reported
* by the register and the size of the context layout
* in the docs. Both are described as authorative!
*
* The discrepancy is on the order of a few cachelines,
* but the total is under one page (4k), which is our
* minimum allocation anyway so it should all come
* out in the wash.
*/
cxt_size = I915_READ(CXT_SIZE) + 1;
DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
INTEL_GEN(dev_priv),
cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
/* For the special day when i810 gets merged. */
......@@ -312,6 +319,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->class = info->class;
engine->instance = info->instance;
/*
* To be overridden by the backend on setup. However to facilitate
* cleanup on error during setup, we always provide the destroy vfunc.
*/
engine->destroy = (typeof(engine->destroy))kfree;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
......@@ -336,18 +349,70 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
return 0;
}
static void __setup_engine_capabilities(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (engine->class == VIDEO_DECODE_CLASS) {
/*
* HEVC support is present on first engine instance
* before Gen11 and on all instances afterwards.
*/
if (INTEL_GEN(i915) >= 11 ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_CLASS_CAPABILITY_HEVC;
/*
* SFC block is present only on even logical engine
* instances.
*/
if ((INTEL_GEN(i915) >= 11 &&
RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
(INTEL_GEN(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
if (INTEL_GEN(i915) >= 9)
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
}
static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id)
__setup_engine_capabilities(engine);
}
/**
* intel_engines_cleanup() - free the resources allocated for Command Streamers
* @i915: the i915 devic
*/
void intel_engines_cleanup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
engine->destroy(engine);
i915->engine[id] = NULL;
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @dev_priv: i915 device private
* @i915: the i915 device
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
int intel_engines_init_mmio(struct drm_i915_private *i915)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct intel_device_info *device_info = mkwrite_device_info(i915);
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
unsigned int i;
int err;
......@@ -360,10 +425,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
if (!HAS_ENGINE(i915, i))
continue;
err = intel_engine_setup(dev_priv, i);
err = intel_engine_setup(i915, i);
if (err)
goto cleanup;
......@@ -379,69 +444,52 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
err = -ENODEV;
goto cleanup;
}
RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
i915_check_and_clear_faults(i915);
intel_setup_engine_capabilities(i915);
return 0;
cleanup:
for_each_engine(engine, dev_priv, id)
kfree(engine);
intel_engines_cleanup(i915);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
* @dev_priv: i915 device private
* @i915: i915 device private
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
int intel_engines_init(struct drm_i915_private *i915)
{
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id, err_id;
enum intel_engine_id id;
int err;
for_each_engine(engine, dev_priv, id) {
const struct engine_class_info *class_info =
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
err = -EINVAL;
err_id = id;
if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
if (HAS_EXECLISTS(i915))
init = intel_execlists_submission_init;
else
init = intel_ring_submission_init;
for_each_engine(engine, i915, id) {
err = init(engine);
if (err)
goto cleanup;
GEM_BUG_ON(!engine->submit_request);
}
return 0;
cleanup:
for_each_engine(engine, dev_priv, id) {
if (id >= err_id) {
kfree(engine);
dev_priv->engine[id] = NULL;
} else {
dev_priv->gt.cleanup_engine(engine);
}
}
intel_engines_cleanup(i915);
return err;
}
......@@ -450,7 +498,7 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
i915_gem_batch_pool_init(&engine->batch_pool, engine);
}
static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
......@@ -557,16 +605,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return ret;
}
/**
* intel_engines_setup_common - setup engine state not requiring hw access
* @engine: Engine to setup.
*
* Initializes @engine@ structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
int intel_engine_setup_common(struct intel_engine_cs *engine)
static int intel_engine_setup_common(struct intel_engine_cs *engine)
{
int err;
......@@ -583,10 +622,15 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlist(engine);
intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
return 0;
......@@ -595,6 +639,49 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
return err;
}
/**
* intel_engines_setup- setup engine state not requiring hw access
* @i915: Device to setup.
*
* Initializes engine structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
int intel_engines_setup(struct drm_i915_private *i915)
{
int (*setup)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
if (HAS_EXECLISTS(i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
for_each_engine(engine, i915, id) {
err = intel_engine_setup_common(engine);
if (err)
goto cleanup;
err = setup(engine);
if (err)
goto cleanup;
/* We expect the backend to take control over its state */
GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
GEM_BUG_ON(!engine->cops);
}
return 0;
cleanup:
intel_engines_cleanup(i915);
return err;
}
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
static const struct {
......@@ -675,6 +762,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
i915_timeline_unpin(&frame->timeline);
......@@ -690,11 +778,17 @@ static int pin_context(struct i915_gem_context *ctx,
struct intel_context **out)
{
struct intel_context *ce;
int err;
ce = intel_context_pin(ctx, engine);
ce = i915_gem_context_get_engine(ctx, engine->id);
if (IS_ERR(ce))
return PTR_ERR(ce);
err = intel_context_pin(ce);
intel_context_put(ce);
if (err)
return err;
*out = ce;
return 0;
}
......@@ -753,30 +847,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return ret;
}
void intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
for_each_engine(engine, i915, id) {
struct intel_context *ce;
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
}
}
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
......@@ -1062,10 +1132,15 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (i915_reset_failed(engine->i915))
return true;
if (!intel_wakeref_active(&engine->wakeref))
return true;
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
synchronize_hardirq(engine->i915->drm.irq);
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
......@@ -1123,117 +1198,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
static bool reset_engines(struct drm_i915_private *i915)
{
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
return false;
return intel_gpu_reset(i915, ALL_ENGINES) == 0;
}
/**
* intel_engines_sanitize: called after the GPU has lost power
* @i915: the i915 device
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_engines_sanitize() if the GPU has not
* been reset results in much confusion!
*/
void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
if (!reset_engines(i915) && !force)
return;
for_each_engine(engine, i915, id)
intel_engine_reset(engine, false);
}
/**
* intel_engines_park: called when the GT is transitioning from busy->idle
* @i915: the i915 device
*
* The GT is now idle and about to go to sleep (maybe never to wake again?).
* Time for us to tidy and put away our toys (release resources back to the
* system).
*/
void intel_engines_park(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
/* Flush the residual irq tasklets first. */
intel_engine_disarm_breadcrumbs(engine);
tasklet_kill(&engine->execlists.tasklet);
/*
* We are committed now to parking the engines, make sure there
* will be no more interrupts arriving later and the engines
* are truly idle.
*/
if (wait_for(intel_engine_is_idle(engine), 10)) {
struct drm_printer p = drm_debug_printer(__func__);
dev_err(i915->drm.dev,
"%s is not idle before parking\n",
engine->name);
intel_engine_dump(engine, &p, NULL);
}
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);
if (engine->pinned_default_state) {
i915_gem_object_unpin_map(engine->default_state);
engine->pinned_default_state = NULL;
}
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
i915->gt.active_engines = 0;
}
/**
* intel_engines_unpark: called when the GT is transitioning from idle->busy
* @i915: the i915 device
*
* The GT was idle and now about to fire up with some new user requests.
*/
void intel_engines_unpark(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
void *map;
/* Pin the default state for fast resets from atomic context. */
map = NULL;
if (engine->default_state)
map = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
if (engine->unpark)
engine->unpark(engine);
intel_engine_init_hangcheck(engine);
}
}
/**
* intel_engine_lost_context: called when the GPU is reset into unknown state
* @engine: the engine
......@@ -1312,8 +1276,11 @@ static void print_request(struct drm_printer *m,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&rq->fence.flags) ? "+" :
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
&rq->fence.flags) ? "-" :
"",
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
......@@ -1518,9 +1485,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
engine->hangcheck.last_seqno,
engine->hangcheck.next_seqno,
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
drm_printf(m, "\tHangcheck: %d ms ago\n",
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
......@@ -1752,6 +1718,5 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"
#include "selftest_engine_cs.c"
#endif
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
void *map;
GEM_TRACE("%s\n", engine->name);
intel_gt_pm_get(engine->i915);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
if (engine->default_state)
map = i915_gem_object_pin_map(engine->default_state,
I915_MAP_WB);
if (!IS_ERR_OR_NULL(map))
engine->pinned_default_state = map;
if (engine->unpark)
engine->unpark(engine);
intel_engine_init_hangcheck(engine);
return 0;
}
void intel_engine_pm_get(struct intel_engine_cs *engine)
{
intel_wakeref_get(engine->i915, &engine->wakeref, __engine_unpark);
}
void intel_engine_park(struct intel_engine_cs *engine)
{
/*
* We are committed now to parking this engine, make sure there
* will be no more interrupts arriving later and the engine
* is truly idle.
*/
if (wait_for(intel_engine_is_idle(engine), 10)) {
struct drm_printer p = drm_debug_printer(__func__);
dev_err(engine->i915->drm.dev,
"%s is not idle before parking\n",
engine->name);
intel_engine_dump(engine, &p, NULL);
}
}
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
return true;
/* GPU is pointing to the void, as good as in the kernel context. */
if (i915_reset_failed(engine->i915))
return true;
/*
* Note, we do this without taking the timeline->mutex. We cannot
* as we may be called while retiring the kernel context and so
* already underneath the timeline->mutex. Instead we rely on the
* exclusive property of the __engine_park that prevents anyone
* else from creating a request on this engine. This also requires
* that the ring is empty and we avoid any waits while constructing
* the context, as they assume protection by the timeline->mutex.
* This should hold true as we can only park the engine after
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
return true;
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
__i915_request_commit(rq);
return false;
}
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
/*
* If one and only one request is completed between pm events,
* we know that we are inside the kernel context and it is
* safe to power down. (We are paranoid in case that runtime
* suspend causes corruption to the active context image, and
* want to avoid that impacting userspace.)
*/
if (!switch_to_kernel_context(engine))
return -EBUSY;
GEM_TRACE("%s\n", engine->name);
intel_engine_disarm_breadcrumbs(engine);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);
if (engine->pinned_default_state) {
i915_gem_object_unpin_map(engine->default_state);
engine->pinned_default_state = NULL;
}
engine->execlists.no_priolist = false;
intel_gt_pm_put(engine->i915);
return 0;
}
void intel_engine_pm_put(struct intel_engine_cs *engine)
{
intel_wakeref_put(engine->i915, &engine->wakeref, __engine_park);
}
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref);
}
int intel_engines_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
intel_gt_pm_put(i915);
return err;
}
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
struct drm_i915_private;
struct intel_engine_cs;
void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine);
int intel_engines_resume(struct drm_i915_private *i915);
#endif /* INTEL_ENGINE_PM_H */
......@@ -14,14 +14,15 @@
#include <linux/types.h>
#include "i915_gem.h"
#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
#include "i915_timeline_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
#include "i915_gem_batch_pool.h"
#include "i915_pmu.h"
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 8
......@@ -52,8 +53,8 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
u32 last_seqno;
u32 next_seqno;
u32 last_ring;
u32 last_head;
unsigned long action_timestamp;
struct intel_instdone instdone;
};
......@@ -226,6 +227,7 @@ struct intel_engine_execlists {
* @queue: queue of requests, in priority lists
*/
struct rb_root_cached queue;
struct rb_root_cached virtual;
/**
* @csb_write: control register for Context Switch buffer
......@@ -278,6 +280,10 @@ struct intel_engine_cs {
u32 context_size;
u32 mmio_base;
u32 uabi_capabilities;
struct intel_sseu sseu;
struct intel_ring *buffer;
struct i915_timeline timeline;
......@@ -285,6 +291,10 @@ struct intel_engine_cs {
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
unsigned long serial;
unsigned long wakeref_serial;
struct intel_wakeref wakeref;
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
......@@ -357,7 +367,7 @@ struct intel_engine_cs {
void (*irq_enable)(struct intel_engine_cs *engine);
void (*irq_disable)(struct intel_engine_cs *engine);
int (*init_hw)(struct intel_engine_cs *engine);
int (*resume)(struct intel_engine_cs *engine);
struct {
void (*prepare)(struct intel_engine_cs *engine);
......@@ -397,6 +407,13 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct i915_request *rq);
/*
* Called on signaling of a SUBMIT_FENCE, passing along the signaling
* request down to the bonded pairs.
*/
void (*bond_execute)(struct i915_request *rq,
struct dma_fence *signal);
/*
* Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
......@@ -413,7 +430,7 @@ struct intel_engine_cs {
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
void (*cleanup)(struct intel_engine_cs *engine);
void (*destroy)(struct intel_engine_cs *engine);
struct intel_engine_execlists execlists;
......@@ -438,6 +455,7 @@ struct intel_engine_cs {
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
unsigned int flags;
/*
......@@ -527,6 +545,12 @@ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
static inline bool
intel_engine_is_virtual(const struct intel_engine_cs *engine)
{
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
#define instdone_slice_mask(dev_priv__) \
(IS_GEN(dev_priv__, 7) ? \
1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
static void pm_notify(struct drm_i915_private *i915, int state)
{
blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
}
static int intel_gt_unpark(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
GEM_TRACE("\n");
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
* command submission.
*
* This activity has negative impact on the performance of the chip with
* huge latencies observed in the interrupt handler and elsewhere.
*
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_busy(i915);
i915_pmu_gt_unparked(i915);
i915_queue_hangcheck(i915);
pm_notify(i915, INTEL_GT_UNPARK);
return 0;
}
void intel_gt_pm_get(struct drm_i915_private *i915)
{
intel_wakeref_get(i915, &i915->gt.wakeref, intel_gt_unpark);
}
static int intel_gt_park(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
GEM_TRACE("\n");
pm_notify(i915, INTEL_GT_PARK);
i915_pmu_gt_parked(i915);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
void intel_gt_pm_put(struct drm_i915_private *i915)
{
intel_wakeref_put(i915, &i915->gt.wakeref, intel_gt_park);
}
void intel_gt_pm_init(struct drm_i915_private *i915)
{
intel_wakeref_init(&i915->gt.wakeref);
BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
}
static bool reset_engines(struct drm_i915_private *i915)
{
if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
return false;
return intel_gpu_reset(i915, ALL_ENGINES) == 0;
}
/**
* intel_gt_sanitize: called after the GPU has lost power
* @i915: the i915 device
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_gt_sanitize() if the GPU has not
* been reset results in much confusion!
*/
void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
if (!reset_engines(i915) && !force)
return;
for_each_engine(engine, i915, id)
intel_engine_reset(engine, false);
}
void intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
for_each_engine(engine, i915, id) {
struct intel_context *ce;
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
}
}
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_GT_PM_H
#define INTEL_GT_PM_H
#include <linux/types.h>
struct drm_i915_private;
enum {
INTEL_GT_UNPARK,
INTEL_GT_PARK,
};
void intel_gt_pm_get(struct drm_i915_private *i915);
void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
void intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */
......@@ -22,12 +22,13 @@
*
*/
#include "intel_reset.h"
#include "i915_drv.h"
#include "i915_reset.h"
struct hangcheck {
u64 acthd;
u32 seqno;
u32 ring;
u32 head;
enum intel_engine_hangcheck_action action;
unsigned long action_timestamp;
int deadlock;
......@@ -133,26 +134,31 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
hc->seqno = intel_engine_get_hangcheck_seqno(engine);
hc->ring = ENGINE_READ(engine, RING_START);
hc->head = ENGINE_READ(engine, RING_HEAD);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
engine->hangcheck.last_seqno = hc->seqno;
engine->hangcheck.last_ring = hc->ring;
engine->hangcheck.last_head = hc->head;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
if (engine->hangcheck.last_seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
if (intel_engine_is_idle(engine))
return ENGINE_IDLE;
if (engine->hangcheck.last_ring != hc->ring)
return ENGINE_ACTIVE_SEQNO;
if (engine->hangcheck.last_head != hc->head)
return ENGINE_ACTIVE_SEQNO;
return engine_stuck(engine, hc->acthd);
}
......@@ -256,6 +262,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0, wedged = 0;
intel_wakeref_t wakeref;
if (!i915_modparams.enable_hangcheck)
return;
......@@ -266,6 +273,10 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (i915_terminally_wedged(dev_priv))
return;
wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
......@@ -313,6 +324,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
intel_runtime_pm_put(dev_priv, wakeref);
/* Reset timer in case GPU hangs without another request being added */
i915_queue_hangcheck(dev_priv);
}
......@@ -330,5 +343,5 @@ void intel_hangcheck_init(struct drm_i915_private *i915)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_hangcheck.c"
#include "selftest_hangcheck.c"
#endif
......@@ -24,8 +24,7 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
#include "intel_ringbuffer.h"
#include "i915_gem_context.h"
#include "intel_engine.h"
/* Execlists regs */
#define RING_ELSP(base) _MMIO((base) + 0x230)
......@@ -67,8 +66,9 @@ enum {
/* Logical Rings */
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
int intel_execlists_submission_setup(struct intel_engine_cs *engine);
int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
......@@ -99,7 +99,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
struct drm_printer;
struct drm_i915_private;
struct i915_gem_context;
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
......@@ -115,6 +114,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
const char *prefix),
unsigned int max);
u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *ctx_sseu);
struct intel_context *
intel_execlists_create_virtual(struct i915_gem_context *ctx,
struct intel_engine_cs **siblings,
unsigned int count);
struct intel_context *
intel_execlists_clone_virtual(struct i915_gem_context *ctx,
struct intel_engine_cs *src);
int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
#endif /* _INTEL_LRC_H_ */
......@@ -20,9 +20,11 @@
* SOFTWARE.
*/
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
/* structures required */
struct drm_i915_mocs_entry {
......
......@@ -49,7 +49,9 @@
* context handling keep the MOCS in step.
*/
#include "i915_drv.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
int intel_rcs_context_init_mocs(struct i915_request *rq);
void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
......
......@@ -9,9 +9,13 @@
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_reset.h"
#include "i915_irq.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_reset.h"
#include "intel_guc.h"
#include "intel_overlay.h"
#define RESET_MAX_RETRIES 3
......@@ -641,9 +645,6 @@ int intel_gpu_reset(struct drm_i915_private *i915,
bool intel_has_gpu_reset(struct drm_i915_private *i915)
{
if (USES_GUC(i915))
return false;
if (!i915_modparams.reset)
return NULL;
......@@ -683,6 +684,7 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
......@@ -718,6 +720,7 @@ static void reset_prepare(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
......@@ -755,48 +758,10 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
struct work_struct work;
struct drm_i915_private *i915;
};
static void restart_work(struct work_struct *work)
{
struct i915_gpu_restart *arg = container_of(work, typeof(*arg), work);
struct drm_i915_private *i915 = arg->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(i915);
mutex_lock(&i915->drm.struct_mutex);
WRITE_ONCE(i915->gpu_error.restart, NULL);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
/*
* Ostensibily, we always want a context loaded for powersaving,
* so if the engine is idle after the reset, send a request
* to load our scratch kernel_context.
*/
if (!intel_engine_is_idle(engine))
continue;
rq = i915_request_alloc(engine, i915->kernel_context);
if (!IS_ERR(rq))
i915_request_add(rq);
}
mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
kfree(arg);
}
static void reset_finish(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
......@@ -806,29 +771,7 @@ static void reset_finish(struct drm_i915_private *i915)
reset_finish_engine(engine);
intel_engine_signal_breadcrumbs(engine);
}
}
static void reset_restart(struct drm_i915_private *i915)
{
struct i915_gpu_restart *arg;
/*
* Following the reset, ensure that we always reload context for
* powersaving, and to correct engine->last_retired_context. Since
* this requires us to submit a request, queue a worker to do that
* task for us to evade any locking here.
*/
if (READ_ONCE(i915->gpu_error.restart))
return;
arg = kmalloc(sizeof(*arg), GFP_KERNEL);
if (arg) {
arg->i915 = i915;
INIT_WORK(&arg->work, restart_work);
WRITE_ONCE(i915->gpu_error.restart, arg);
queue_work(i915->wq, &arg->work);
}
intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
......@@ -889,6 +832,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
set_bit(I915_WEDGED, &error->flags);
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
......@@ -896,9 +840,6 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
reset_finish(i915);
smp_mb__before_atomic();
set_bit(I915_WEDGED, &error->flags);
GEM_TRACE("end\n");
}
......@@ -956,7 +897,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
}
mutex_unlock(&i915->gt.timelines.mutex);
intel_engines_sanitize(i915, false);
intel_gt_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
......@@ -1034,7 +975,6 @@ void i915_reset(struct drm_i915_private *i915,
GEM_TRACE("flags=%lx\n", error->flags);
might_sleep();
assert_rpm_wakelock_held(i915);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
......@@ -1087,8 +1027,6 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
if (!__i915_wedged(error))
reset_restart(i915);
return;
taint:
......@@ -1104,7 +1042,7 @@ void i915_reset(struct drm_i915_private *i915,
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
add_taint_for_CI(TAINT_WARN);
error:
__i915_gem_set_wedged(i915);
goto finish;
......@@ -1137,6 +1075,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
if (!intel_wakeref_active(&engine->wakeref))
return 0;
reset_prepare_engine(engine);
if (msg)
......@@ -1168,7 +1109,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->init_hw(engine);
ret = engine->resume(engine);
if (ret)
goto out;
......@@ -1425,25 +1366,6 @@ int i915_terminally_wedged(struct drm_i915_private *i915)
return __i915_wedged(error) ? -EIO : 0;
}
bool i915_reset_flush(struct drm_i915_private *i915)
{
int err;
cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
flush_workqueue(i915->wq);
GEM_BUG_ON(READ_ONCE(i915->gpu_error.restart));
mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&i915->drm.struct_mutex);
return !err;
}
static void i915_wedge_me(struct work_struct *work)
{
struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
......@@ -1472,3 +1394,7 @@ void __i915_fini_wedge(struct i915_wedge_me *w)
destroy_delayed_work_on_stack(&w->work);
w->i915 = NULL;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
#endif
......@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/srcu.h>
#include "intel_engine_types.h"
#include "gt/intel_engine_types.h"
struct drm_i915_private;
struct i915_request;
......@@ -34,7 +34,6 @@ int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
void i915_reset_request(struct i915_request *rq, bool guilty);
bool i915_reset_flush(struct drm_i915_private *i915);
int __must_check i915_reset_trylock(struct drm_i915_private *i915);
void i915_reset_unlock(struct drm_i915_private *i915, int tag);
......
......@@ -33,9 +33,8 @@
#include "i915_drv.h"
#include "i915_gem_render_state.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
......@@ -310,11 +309,6 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
......@@ -416,13 +410,6 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->timeline->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_GLOBAL_GTT_IVB);
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
......@@ -441,12 +428,7 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
......@@ -466,10 +448,6 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
......@@ -481,6 +459,7 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
......@@ -638,12 +617,15 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_engine_cs *engine)
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring = engine->buffer;
int ret = 0;
GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
engine->name, ring->head, ring->tail);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
......@@ -828,12 +810,23 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
return 0;
}
static int init_render_ring(struct intel_engine_cs *engine)
static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
* address may no longer be valid, leading to a GPU hang.
*
* This imposes the requirement that userspace reload their
* CONSTANT_BUFFER on every batch, fortunately a requirement
* they are already accustomed to from before contexts were
* enabled.
*/
if (IS_GEN(dev_priv, 4))
I915_WRITE(ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN_RANGE(dev_priv, 4, 6))
......@@ -873,10 +866,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
return 0;
return xcs_resume(engine);
}
static void cancel_requests(struct intel_engine_cs *engine)
......@@ -918,11 +908,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
......@@ -940,10 +927,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
*cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
......@@ -952,7 +935,6 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
}
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
......@@ -1517,77 +1499,13 @@ static const struct intel_context_ops ring_context_ops = {
.pin = ring_context_pin,
.unpin = ring_context_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.reset = ring_context_reset,
.destroy = ring_context_destroy,
};
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
int err;
err = intel_engine_setup_common(engine);
if (err)
return err;
timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
err = intel_engine_init_common(engine);
if (err)
goto err_unpin;
GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
err_unpin:
intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err:
intel_engine_cleanup_common(engine);
return err;
}
void intel_engine_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
intel_engine_cleanup_common(engine);
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt)
{
......@@ -1646,11 +1564,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* These flags are for resource streamer on HSW+ */
flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
else
/* We need to save the extended state for powersaving modes */
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (IS_GEN(i915, 7))
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
else if (IS_GEN(i915, 5))
len += 2;
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
......@@ -1679,6 +1600,14 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
} else if (IS_GEN(i915, 5)) {
/*
* This w/a is only listed for pre-production ilk a/b steppings,
* but is also mentioned for programming the powerctx. To be
* safe, just apply the workaround; we do not use SyncFlush so
* this should never take effect and so be a no-op!
*/
*cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
}
if (force_restore) {
......@@ -1732,6 +1661,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
} else if (IS_GEN(i915, 5)) {
*cs++ = MI_SUSPEND_FLUSH;
}
intel_ring_advance(rq, cs);
......@@ -1776,7 +1707,6 @@ static int switch_context(struct i915_request *rq)
u32 hw_flags = 0;
int ret, i;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) {
......@@ -1888,12 +1818,12 @@ static int ring_request_alloc(struct i915_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
ret = switch_context(request);
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
ret = switch_context(request);
if (ret)
return ret;
......@@ -1906,8 +1836,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
struct i915_request *target;
long timeout;
lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
if (intel_ring_update_space(ring) >= bytes)
return 0;
......@@ -2167,24 +2095,6 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
} else if (INTEL_GEN(dev_priv) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (INTEL_GEN(dev_priv) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
} else {
engine->irq_enable = i8xx_irq_enable;
engine->irq_disable = i8xx_irq_disable;
}
}
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
......@@ -2200,15 +2110,51 @@ static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = gen6_bsd_submit_request;
}
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
static void ring_destroy(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
intel_engine_cleanup_common(engine);
kfree(engine);
}
static void setup_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (INTEL_GEN(i915) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
} else if (INTEL_GEN(i915) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (INTEL_GEN(i915) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
} else {
engine->irq_enable = i8xx_irq_enable;
engine->irq_disable = i8xx_irq_disable;
}
}
static void setup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
/* gen8+ are only supported with execlists */
GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
GEM_BUG_ON(INTEL_GEN(i915) >= 8);
intel_ring_init_irq(dev_priv, engine);
setup_irq(engine);
engine->init_hw = init_ring_common;
engine->destroy = ring_destroy;
engine->resume = xcs_resume;
engine->reset.prepare = reset_prepare;
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
......@@ -2222,117 +2168,96 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
if (IS_GEN(dev_priv, 5))
if (IS_GEN(i915, 5))
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
if (INTEL_GEN(dev_priv) >= 6)
if (INTEL_GEN(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
else if (INTEL_GEN(i915) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
}
int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
static void setup_rcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
intel_ring_default_vfuncs(dev_priv, engine);
struct drm_i915_private *i915 = engine->i915;
if (HAS_L3_DPF(dev_priv))
if (HAS_L3_DPF(i915))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(dev_priv) >= 7) {
if (INTEL_GEN(i915) >= 7) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
} else if (IS_GEN(dev_priv, 6)) {
} else if (IS_GEN(i915, 6)) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
} else if (IS_GEN(dev_priv, 5)) {
} else if (IS_GEN(i915, 5)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
if (INTEL_GEN(i915) < 4)
engine->emit_flush = gen2_render_ring_flush;
else
engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(dev_priv))
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
engine->init_hw = init_render_ring;
ret = intel_init_ring_buffer(engine);
if (ret)
return ret;
return 0;
engine->resume = rcs_resume;
}
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
static void setup_vcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
struct drm_i915_private *i915 = engine->i915;
if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_GEN(i915) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN(dev_priv, 6))
if (IS_GEN(i915, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
if (IS_GEN(dev_priv, 6))
if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
} else {
engine->emit_flush = bsd_ring_flush;
if (IS_GEN(dev_priv, 5))
if (IS_GEN(i915, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
return intel_init_ring_buffer(engine);
}
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
static void setup_bcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
intel_ring_default_vfuncs(dev_priv, engine);
struct drm_i915_private *i915 = engine->i915;
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
if (IS_GEN(dev_priv, 6))
if (IS_GEN(i915, 6))
engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
else
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
return intel_init_ring_buffer(engine);
}
int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
static void setup_vecs(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
struct drm_i915_private *i915 = engine->i915;
intel_ring_default_vfuncs(dev_priv, engine);
GEM_BUG_ON(INTEL_GEN(i915) < 7);
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
......@@ -2340,6 +2265,73 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
engine->irq_disable = hsw_vebox_irq_disable;
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
return intel_init_ring_buffer(engine);
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
setup_common(engine);
switch (engine->class) {
case RENDER_CLASS:
setup_rcs(engine);
break;
case VIDEO_DECODE_CLASS:
setup_vcs(engine);
break;
case COPY_ENGINE_CLASS:
setup_bcs(engine);
break;
case VIDEO_ENHANCEMENT_CLASS:
setup_vecs(engine);
break;
default:
MISSING_CASE(engine->class);
return -ENODEV;
}
return 0;
}
int intel_ring_submission_init(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
int err;
timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err;
}
err = intel_ring_pin(ring);
if (err)
goto err_ring;
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
err = intel_engine_init_common(engine);
if (err)
goto err_unpin;
GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
return 0;
err_unpin:
intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err:
intel_engine_cleanup_common(engine);
return err;
}
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
bool subslice_pg = sseu->has_subslice_pg;
struct intel_sseu ctx_sseu;
u8 slices, subslices;
u32 rpcs = 0;
/*
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
if (INTEL_GEN(i915) < 9)
return 0;
/*
* If i915/perf is active, we want a stable powergating configuration
* on the system.
*
* We could choose full enablement, but on ICL we know there are use
* cases which disable slices for functional, apart for performance
* reasons. So in this case we select a known stable subset.
*/
if (!i915->perf.oa.exclusive_stream) {
ctx_sseu = *req_sseu;
} else {
ctx_sseu = intel_sseu_from_device_info(sseu);
if (IS_GEN(i915, 11)) {
/*
* We only need subslice count so it doesn't matter
* which ones we select - just turn off low bits in the
* amount of half of all available subslices per slice.
*/
ctx_sseu.subslice_mask =
~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
ctx_sseu.slice_mask = 0x1;
}
}
slices = hweight8(ctx_sseu.slice_mask);
subslices = hweight8(ctx_sseu.subslice_mask);
/*
* Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
* wide and Icelake has up to eight subslices, specfial programming is
* needed in order to correctly enable all subslices.
*
* According to documentation software must consider the configuration
* as 2x4x8 and hardware will translate this to 1x8x8.
*
* Furthemore, even though SScount is three bits, maximum documented
* value for it is four. From this some rules/restrictions follow:
*
* 1.
* If enabled subslice count is greater than four, two whole slices must
* be enabled instead.
*
* 2.
* When more than one slice is enabled, hardware ignores the subslice
* count altogether.
*
* From these restrictions it follows that it is not possible to enable
* a count of subslices between the SScount maximum of four restriction,
* and the maximum available number on a particular SKU. Either all
* subslices are enabled, or a count between one and four on the first
* slice.
*/
if (IS_GEN(i915, 11) &&
slices == 1 &&
subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
GEM_BUG_ON(subslices & 1);
subslice_pg = false;
slices *= 2;
}
/*
* Starting in Gen9, render power gating can leave
* slice/subslice/EU in a partially enabled state. We
* must make an explicit request through RPCS for full
* enablement.
*/
if (sseu->has_slice_pg) {
u32 mask, val = slices;
if (INTEL_GEN(i915) >= 11) {
mask = GEN11_RPCS_S_CNT_MASK;
val <<= GEN11_RPCS_S_CNT_SHIFT;
} else {
mask = GEN8_RPCS_S_CNT_MASK;
val <<= GEN8_RPCS_S_CNT_SHIFT;
}
GEM_BUG_ON(val & ~mask);
val &= mask;
rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
}
if (subslice_pg) {
u32 val = subslices;
val <<= GEN8_RPCS_SS_CNT_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
val &= GEN8_RPCS_SS_CNT_MASK;
rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
}
if (sseu->has_eu_pg) {
u32 val;
val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
rpcs |= val;
rpcs |= GEN8_RPCS_ENABLE;
}
return rpcs;
}
此差异已折叠。
......@@ -4,13 +4,17 @@
* Copyright © 2014-2018 Intel Corporation
*/
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
#ifndef _INTEL_WORKAROUNDS_H_
#define _INTEL_WORKAROUNDS_H_
#include <linux/slab.h>
#include "intel_workarounds_types.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
kfree(wal->list);
......@@ -30,5 +34,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from);
#endif
......@@ -12,9 +12,10 @@
#include "i915_reg.h"
struct i915_wa {
i915_reg_t reg;
u32 mask;
u32 val;
i915_reg_t reg;
u32 mask;
u32 val;
u32 read;
};
struct i915_wa_list {
......
......@@ -29,7 +29,7 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include "../intel_ringbuffer.h"
#include "gt/intel_engine.h"
struct mock_engine {
struct intel_engine_cs base;
......@@ -42,6 +42,8 @@ struct mock_engine {
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id);
int mock_engine_init(struct intel_engine_cs *engine);
void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine);
......
此差异已折叠。
......@@ -149,9 +149,9 @@ struct intel_vgpu_submission_ops {
struct intel_vgpu_submission {
struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
struct list_head workload_q_head[I915_NUM_ENGINES];
struct intel_context *shadow[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
union {
u64 i915_context_pml4;
u64 i915_context_pdps[GEN8_3LVL_PDPES];
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册