未验证 提交 d2c20b5d 编写于 作者: M Maxime Ripard

Merge drm/drm-next into drm-misc-next

danvet needs a backmerge to ease the upcoming drmP.h rework
Signed-off-by: NMaxime Ripard <maxime.ripard@bootlin.com>
......@@ -8,6 +8,7 @@ Required properties:
- compatible : Shall contain one of
- "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
- "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
- "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
- "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
- "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
......@@ -25,7 +26,7 @@ Required properties:
- clock-names: Name of the clocks. This property is model-dependent.
- The functional clock, which mandatory for all models, shall be listed
first, and shall be named "fck".
- On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or
- On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or
DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be
named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN
numerical index.
......
......@@ -7,6 +7,7 @@ Required Properties:
- "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
......@@ -57,6 +58,7 @@ corresponding to each DU output.
R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
......
......@@ -15,6 +15,7 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/regmap.h>
#include <acpi/acpi_lpat.h>
#include "intel_pmic.h"
......@@ -36,6 +37,8 @@ struct intel_pmic_opregion {
struct intel_pmic_regs_handler_ctx ctx;
};
static struct intel_pmic_opregion *intel_pmic_opregion;
static int pmic_get_reg_bit(int address, struct pmic_table *table,
int count, int *reg, int *bit)
{
......@@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
}
opregion->data = d;
intel_pmic_opregion = opregion;
return 0;
out_remove_thermal_handler:
......@@ -319,3 +323,60 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
/**
* intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence
* @i2c_address: I2C client address for the PMIC
* @reg_address: PMIC register address
* @value: New value for the register bits to change
* @mask: Mask indicating which register bits to change
*
* DSI LCD panels describe an initialization sequence in the i915 VBT (Video
* BIOS Tables) using so called MIPI sequences. One possible element in these
* sequences is a PMIC specific element of 15 bytes.
*
* This function executes these PMIC specific elements sending the embedded
* commands to the PMIC.
*
* Return 0 on success, < 0 on failure.
*/
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
u32 value, u32 mask)
{
struct intel_pmic_opregion_data *d;
int ret;
if (!intel_pmic_opregion) {
pr_warn("%s: No PMIC registered\n", __func__);
return -ENXIO;
}
d = intel_pmic_opregion->data;
mutex_lock(&intel_pmic_opregion->lock);
if (d->exec_mipi_pmic_seq_element) {
ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap,
i2c_address, reg_address,
value, mask);
} else if (d->pmic_i2c_address) {
if (i2c_address == d->pmic_i2c_address) {
ret = regmap_update_bits(intel_pmic_opregion->regmap,
reg_address, mask, value);
} else {
pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n",
__func__, i2c_address, reg_address, value, mask);
ret = -ENXIO;
}
} else {
pr_warn("%s: Not implemented\n", __func__);
pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n",
__func__, i2c_address, reg_address, value, mask);
ret = -EOPNOTSUPP;
}
mutex_unlock(&intel_pmic_opregion->lock);
return ret;
}
EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element);
......@@ -15,10 +15,14 @@ struct intel_pmic_opregion_data {
int (*update_aux)(struct regmap *r, int reg, int raw_temp);
int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address,
u32 reg_address, u32 value, u32 mask);
struct pmic_table *power_table;
int power_table_count;
struct pmic_table *thermal_table;
int thermal_table_count;
/* For generic exec_mipi_pmic_seq_element handling */
int pmic_i2c_address;
};
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d);
......
......@@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0);
}
static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap,
u16 i2c_client_address,
u32 reg_address,
u32 value, u32 mask)
{
u32 address;
if (i2c_client_address > 0xff || reg_address > 0xff) {
pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n",
__func__, i2c_client_address, reg_address);
return -ERANGE;
}
address = (i2c_client_address << 8) | reg_address;
return regmap_update_bits(regmap, address, mask, value);
}
/*
* The thermal table and ops are empty, we do not support the Thermal opregion
* (DPTF) due to lacking documentation.
......@@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg,
static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = {
.get_power = intel_cht_wc_pmic_get_power,
.update_power = intel_cht_wc_pmic_update_power,
.exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element,
.power_table = power_table,
.power_table_count = ARRAY_SIZE(power_table),
};
......
......@@ -240,6 +240,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
.power_table_count = ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
.thermal_table_count = ARRAY_SIZE(thermal_table),
.pmic_i2c_address = 0x34,
};
static acpi_status intel_xpower_pmic_gpio_handler(u32 function,
......
......@@ -395,7 +395,7 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
......@@ -411,7 +411,7 @@ static inline void adv7533_dsi_power_off(struct adv7511 *adv)
}
static inline void adv7533_mode_set(struct adv7511 *adv,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
}
......
......@@ -676,8 +676,8 @@ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
}
static void adv7511_mode_set(struct adv7511 *adv7511,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
......@@ -839,8 +839,8 @@ static void adv7511_bridge_disable(struct drm_bridge *bridge)
}
static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
......
......@@ -108,7 +108,7 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
{
struct mipi_dsi_device *dsi = adv->dsi;
int lanes, ret;
......
......@@ -1082,8 +1082,8 @@ static void anx78xx_bridge_disable(struct drm_bridge *bridge)
}
static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
struct hdmi_avi_infoframe frame;
......
......@@ -1361,8 +1361,8 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
}
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_display_info *display_info = &dp->connector.display_info;
......
......@@ -232,8 +232,8 @@ static void sii902x_bridge_enable(struct drm_bridge *bridge)
}
static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct regmap *regmap = sii902x->regmap;
......
......@@ -1999,8 +1999,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct dw_hdmi *hdmi = bridge->driver_private;
......
......@@ -248,7 +248,7 @@ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
* The controller should generate 2 frames before
* preparing the peripheral.
*/
static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode)
{
int refresh, two_frames;
......@@ -564,7 +564,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 val = 0, color = 0;
......@@ -607,7 +607,7 @@ static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
/*
* TODO dw drv improvements
......@@ -642,7 +642,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
/* Get lane byte clock cycles. */
static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
u32 hcomponent)
{
u32 frac, lbcc;
......@@ -658,7 +658,7 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 htotal, hsa, hbp, lbcc;
......@@ -681,7 +681,7 @@ static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 vactive, vsa, vfp, vbp;
......@@ -818,7 +818,7 @@ static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
void *priv_data = dsi->plat_data->priv_data;
......@@ -861,8 +861,8 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
......
......@@ -203,7 +203,7 @@ struct tc_data {
/* display edid */
struct edid *edid;
/* current mode */
struct drm_display_mode *mode;
const struct drm_display_mode *mode;
u32 rev;
u8 assr;
......@@ -648,7 +648,8 @@ static int tc_get_display_props(struct tc_data *tc)
return ret;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
static int tc_set_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
{
int ret;
int vid_sync_dly;
......@@ -1113,8 +1114,8 @@ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connec
}
static void tc_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct tc_data *tc = bridge_to_tc(bridge);
......
......@@ -294,8 +294,8 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
if (!bridge)
return;
......
......@@ -1277,6 +1277,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* Apple panels need some additional handling to support PSR */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
};
#undef OUI
......
......@@ -246,8 +246,8 @@ static void mic_post_disable(struct drm_bridge *bridge)
}
static void mic_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct exynos_mic *mic = bridge->driver_private;
......
......@@ -845,7 +845,7 @@ static int tda998x_write_aif(struct tda998x_priv *priv,
}
static void
tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
......@@ -1339,8 +1339,8 @@ static void tda998x_bridge_disable(struct drm_bridge *bridge)
}
static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
unsigned long tmds_clock;
......
......@@ -40,7 +40,7 @@ i915-y := i915_drv.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
i915_suspend.o \
i915_suspend.o \
i915_syncmap.o \
i915_sw_fence.o \
i915_sysfs.o \
......
......@@ -24,7 +24,6 @@
#define _INTEL_DVO_H
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
......
......@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv))
if (IS_GEN(gvt->dev_priv, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv))
else if (IS_GEN(gvt->dev_priv, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
......
......@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
if (!IS_GEN7(engine->i915))
if (!IS_GEN(engine->i915, 7))
return;
switch (engine->id) {
......
此差异已折叠。
......@@ -41,7 +41,6 @@
#include <linux/vt.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/i915_drm.h>
......@@ -132,15 +131,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv));
WARN_ON(!IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
......@@ -217,9 +216,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
if (IS_GEN5(dev_priv))
if (IS_GEN(dev_priv, 5))
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
......@@ -349,7 +348,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = HAS_LEGACY_SEMAPHORES(dev_priv);
value = 0;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
......@@ -358,12 +357,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
value = INTEL_INFO(dev_priv)->sseu.eu_total;
value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
if (!value)
return -ENODEV;
break;
......@@ -380,7 +379,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
value = intel_huc_check_status(&dev_priv->huc);
......@@ -430,17 +429,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.slice_mask;
value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
......@@ -966,7 +965,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
int mmio_bar;
int mmio_size;
mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
......@@ -1341,7 +1340,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
else if (IS_GEN9(dev_priv))
else if (IS_GEN(dev_priv, 9))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
......@@ -1374,7 +1373,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
intel_device_info_runtime_init(dev_priv);
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
......@@ -1436,7 +1435,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev_priv)) {
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
......@@ -1574,7 +1573,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
acpi_video_register();
}
if (IS_GEN5(dev_priv))
if (IS_GEN(dev_priv, 5))
intel_gpu_ips_init(dev_priv);
intel_audio_init(dev_priv);
......@@ -1636,8 +1635,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
intel_device_info_dump(&dev_priv->info, &p);
intel_device_info_dump_runtime(&dev_priv->info, &p);
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
......@@ -1674,7 +1679,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
device_info->device_id = pdev->device;
RUNTIME_INFO(i915)->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
BITS_PER_TYPE(device_info->platform_mask));
......@@ -2174,7 +2179,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
intel_engines_sanitize(dev_priv);
intel_engines_sanitize(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
......@@ -2226,6 +2231,7 @@ void i915_reset(struct drm_i915_private *i915,
might_sleep();
lockdep_assert_held(&i915->drm.struct_mutex);
assert_rpm_wakelock_held(i915);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
......
此差异已折叠。
......@@ -25,7 +25,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -859,58 +858,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
obj->write_domain = 0;
}
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
int length)
{
int ret, cpu_offset = 0;
while (length > 0) {
int cacheline_end = ALIGN(gpu_offset + 1, 64);
int this_length = min(cacheline_end - gpu_offset, length);
int swizzled_gpu_offset = gpu_offset ^ 64;
ret = __copy_to_user(cpu_vaddr + cpu_offset,
gpu_vaddr + swizzled_gpu_offset,
this_length);
if (ret)
return ret + length;
cpu_offset += this_length;
gpu_offset += this_length;
length -= this_length;
}
return 0;
}
static inline int
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
while (length > 0) {
int cacheline_end = ALIGN(gpu_offset + 1, 64);
int this_length = min(cacheline_end - gpu_offset, length);
int swizzled_gpu_offset = gpu_offset ^ 64;
ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
cpu_vaddr + cpu_offset,
this_length);
if (ret)
return ret + length;
cpu_offset += this_length;
gpu_offset += this_length;
length -= this_length;
}
return 0;
}
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
......@@ -1030,72 +977,23 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
return ret;
}
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
{
if (unlikely(swizzled)) {
unsigned long start = (unsigned long) addr;
unsigned long end = (unsigned long) addr + length;
/* For swizzling simply ensure that we always flush both
* channels. Lame, but simple and it works. Swizzled
* pwrite/pread is far from a hotpath - current userspace
* doesn't use it at all. */
start = round_down(start, 128);
end = round_up(end, 128);
drm_clflush_virt_range((void *)start, end - start);
} else {
drm_clflush_virt_range(addr, length);
}
}
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (needs_clflush)
shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
else
ret = __copy_to_user(user_data, vaddr + offset, length);
kunmap(page);
if (needs_clflush)
drm_clflush_virt_range(vaddr + offset, len);
return ret ? - EFAULT : 0;
}
ret = __copy_to_user(user_data, vaddr + offset, len);
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
int ret;
ret = -ENODEV;
if (!page_do_bit17_swizzling) {
char *vaddr = kmap_atomic(page);
if (needs_clflush)
drm_clflush_virt_range(vaddr + offset, length);
ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
kunmap_atomic(vaddr);
}
if (ret == 0)
return 0;
kunmap(page);
return shmem_pread_slow(page, offset, length, user_data,
page_do_bit17_swizzling, needs_clflush);
return ret ? -EFAULT : 0;
}
static int
......@@ -1104,15 +1002,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
{
char __user *user_data;
u64 remain;
unsigned int obj_do_bit17_swizzling;
unsigned int needs_clflush;
unsigned int idx, offset;
int ret;
obj_do_bit17_swizzling = 0;
if (i915_gem_object_needs_bit17_swizzle(obj))
obj_do_bit17_swizzling = BIT(17);
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
if (ret)
return ret;
......@@ -1130,7 +1023,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
page_to_phys(page) & obj_do_bit17_swizzling,
needs_clflush);
if (ret)
break;
......@@ -1470,33 +1362,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
return ret;
}
static int
shmem_pwrite_slow(struct page *page, int offset, int length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
ret = __copy_from_user_swizzled(vaddr, offset, user_data,
length);
else
ret = __copy_from_user(vaddr + offset, user_data, length);
if (needs_clflush_after)
shmem_clflush_swizzled_range(vaddr + offset, length,
page_do_bit17_swizzling);
kunmap(page);
return ret ? -EFAULT : 0;
}
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
......@@ -1504,31 +1369,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
*/
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
ret = -ENODEV;
if (!page_do_bit17_swizzling) {
char *vaddr = kmap_atomic(page);
vaddr = kmap(page);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + offset, len);
ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + offset, len);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + offset, len);
kunmap_atomic(vaddr);
}
if (ret == 0)
return ret;
ret = __copy_from_user(vaddr + offset, user_data, len);
if (!ret && needs_clflush_after)
drm_clflush_virt_range(vaddr + offset, len);
return shmem_pwrite_slow(page, offset, len, user_data,
page_do_bit17_swizzling,
needs_clflush_before,
needs_clflush_after);
kunmap(page);
return ret ? -EFAULT : 0;
}
static int
......@@ -1538,7 +1396,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
void __user *user_data;
u64 remain;
unsigned int obj_do_bit17_swizzling;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
......@@ -1553,10 +1410,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
obj_do_bit17_swizzling = 0;
if (i915_gem_object_needs_bit17_swizzle(obj))
obj_do_bit17_swizzling = BIT(17);
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
......@@ -1573,7 +1426,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
page_to_phys(page) & obj_do_bit17_swizzling,
(offset | length) & partial_cacheline_write,
needs_clflush & CLFLUSH_AFTER);
if (ret)
......@@ -3227,13 +3079,6 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct i915_request *request,
bool stalled)
{
/*
* Make sure this write is visible before we re-enable the interrupt
* handlers on another CPU, as tasklet_enable() resolves to just
* a compiler barrier which is insufficient for our purpose here.
*/
smp_store_mb(engine->irq_posted, 0);
if (request)
request = i915_gem_reset_request(engine, request, stalled);
......@@ -3315,7 +3160,7 @@ static void nop_submit_request(struct i915_request *request)
spin_lock_irqsave(&request->engine->timeline.lock, flags);
__i915_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
intel_engine_write_global_seqno(request->engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
}
......@@ -3356,7 +3201,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
synchronize_rcu();
......@@ -3384,6 +3229,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
return true;
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
GEM_TRACE("start\n");
/*
......@@ -3422,8 +3270,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
i915_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests);
if (!intel_gpu_reset(i915, ALL_ENGINES))
intel_engines_sanitize(i915);
intel_engines_sanitize(i915, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
......@@ -5027,8 +4874,6 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
int err;
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
......@@ -5053,11 +4898,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
err = -ENODEV;
if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
if (!err)
intel_engines_sanitize(i915);
intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
intel_runtime_pm_put(i915);
......@@ -5223,15 +5064,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
if (IS_GEN5(dev_priv))
if (IS_GEN(dev_priv, 5))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev_priv))
if (IS_GEN(dev_priv, 6))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN7(dev_priv))
else if (IS_GEN(dev_priv, 7))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (IS_GEN8(dev_priv))
else if (IS_GEN(dev_priv, 8))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
......@@ -5253,10 +5094,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
init_unused_ring(dev_priv, SRB1_BASE);
init_unused_ring(dev_priv, SRB2_BASE);
init_unused_ring(dev_priv, SRB3_BASE);
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
} else if (IS_GEN3(dev_priv)) {
} else if (IS_GEN(dev_priv, 3)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, PRB2_BASE);
}
......@@ -5580,7 +5421,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
ret = i915_gem_init_scratch(dev_priv,
IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
......
......@@ -86,7 +86,6 @@
*/
#include <linux/log2.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
......@@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(i915))
if (IS_GEN(i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
......
......@@ -27,7 +27,6 @@
#include <linux/dma-buf.h>
#include <linux/reservation.h>
#include <drm/drmP.h>
#include "i915_drv.h"
......
......@@ -26,7 +26,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......
......@@ -31,7 +31,6 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drmP.h>
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
......@@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN6(eb->i915)) {
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
PIN_GLOBAL);
if (WARN_ONCE(err,
......@@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
......
......@@ -21,7 +21,6 @@
* IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
* and explicitly managed for internal users.
*/
if (IS_GEN2(fence->i915))
if (IS_GEN(fence->i915, 2))
i830_write_fence_reg(fence, vma);
else if (IS_GEN3(fence->i915))
else if (IS_GEN(fence->i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
......@@ -596,13 +595,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev_priv)) {
} else if (IS_GEN(dev_priv, 5)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
......@@ -647,7 +646,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev_priv) &&
if (IS_GEN(dev_priv, 4) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
......
......@@ -33,7 +33,6 @@
#include <asm/set_memory.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -483,7 +482,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
i915_gem_shrinker_taints_mutex(&vm->mutex);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
......@@ -1423,8 +1422,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
}
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
......@@ -1490,84 +1487,6 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
return -ENOMEM;
}
static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
u64 start, u64 length,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd;
u32 pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
struct i915_page_table *pt;
u64 pd_len = length;
u64 pd_start = start;
u32 pde;
if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
u32 pte;
gen8_pte_t *pt_vaddr;
if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
continue;
pt_vaddr = kmap_atomic_px(pt);
for (pte = 0; pte < GEN8_PTES; pte += 4) {
u64 va = (pdpe << GEN8_PDPE_SHIFT |
pde << GEN8_PDE_SHIFT |
pte << GEN8_PTE_SHIFT);
int i;
bool found = false;
for (i = 0; i < 4; i++)
if (pt_vaddr[pte + i] != scratch_pte)
found = true;
if (!found)
continue;
seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
for (i = 0; i < 4; i++) {
if (pt_vaddr[pte + i] != scratch_pte)
seq_printf(m, " %llx", pt_vaddr[pte + i]);
else
seq_puts(m, " SCRATCH ");
}
seq_puts(m, "\n");
}
kunmap_atomic(pt_vaddr);
}
}
}
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
const gen8_pte_t scratch_pte = vm->scratch_pte;
u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
u64 pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
}
} else {
gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
}
}
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
......@@ -1672,7 +1591,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
gen8_ppgtt_notify_vgt(ppgtt, true);
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
ppgtt->debug_dump = gen8_dump_ppgtt;
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
......@@ -1688,60 +1606,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ERR_PTR(err);
}
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
const gen6_pte_t scratch_pte = base->vm.scratch_pte;
struct i915_page_table *pt;
u32 pte, pde;
gen6_for_all_pdes(pt, &base->pd, pde) {
gen6_pte_t *vaddr;
if (pt == base->vm.scratch_pt)
continue;
if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
u32 expected =
GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
GEN6_PDE_VALID;
u32 pd_entry = readl(ppgtt->pd_addr + pde);
if (pd_entry != expected)
seq_printf(m,
"\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
pde,
pd_entry,
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
}
vaddr = kmap_atomic_px(base->pd.page_table[pde]);
for (pte = 0; pte < GEN6_PTES; pte += 4) {
int i;
for (i = 0; i < 4; i++)
if (vaddr[pte + i] != scratch_pte)
break;
if (i == 4)
continue;
seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
pde, pte,
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
if (vaddr[pte + i] != scratch_pte)
seq_printf(m, " %08x", vaddr[pte + i]);
else
seq_puts(m, " SCRATCH");
}
seq_puts(m, "\n");
}
kunmap_atomic(vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
......@@ -2075,6 +1939,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
......@@ -2090,9 +1955,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
return i915_vma_pin(ppgtt->vma,
0, GEN6_PD_ALIGN,
PIN_GLOBAL | PIN_HIGH);
err = i915_vma_pin(ppgtt->vma,
0, GEN6_PD_ALIGN,
PIN_GLOBAL | PIN_HIGH);
if (err)
goto unpin;
return 0;
unpin:
ppgtt->pin_count = 0;
return err;
}
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
......@@ -2129,7 +2002,6 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.debug_dump = gen6_dump_ppgtt;
ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
......@@ -2195,9 +2067,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
if (IS_GEN6(dev_priv))
if (IS_GEN(dev_priv, 6))
gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
else if (IS_GEN(dev_priv, 7))
gen7_ppgtt_enable(dev_priv);
return 0;
......@@ -2279,7 +2151,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
static void gen6_check_faults(struct drm_i915_private *dev_priv)
......@@ -2372,7 +2244,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
DMA_ATTR_NO_WARN))
return 0;
/* If the DMA remap fails, one cause can be that we have
/*
* If the DMA remap fails, one cause can be that we have
* too many objects pinned in a small remapping table,
* such as swiotlb. Incrementally purge all other objects and
* try again - if there are no more pages to remove from
......@@ -2382,8 +2255,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
} while (i915_gem_shrink(to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE));
I915_SHRINK_UNBOUND));
return -ENOSPC;
}
......
......@@ -413,8 +413,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct gen6_hw_ppgtt {
......
......@@ -22,7 +22,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......
......@@ -29,7 +29,8 @@
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include <drm/drm_file.h>
#include <drm/drm_device.h>
#include <drm/i915_drm.h>
......
......@@ -30,30 +30,27 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
static bool shrinker_lock(struct drm_i915_private *i915,
unsigned int flags,
bool *unlock)
{
switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
struct mutex *m = &i915->drm.struct_mutex;
switch (mutex_trylock_recursive(m)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&i915->drm.struct_mutex)) {
*unlock = true;
break;
}
} while (!need_resched());
preempt_enable();
if (flags & I915_SHRINK_ACTIVE &&
mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
*unlock = true;
return *unlock;
case MUTEX_TRYLOCK_SUCCESS:
......@@ -160,7 +157,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
unsigned long scanned = 0;
bool unlock;
if (!shrinker_lock(i915, &unlock))
if (!shrinker_lock(i915, flags, &unlock))
return 0;
/*
......@@ -357,7 +354,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0;
if (!shrinker_lock(i915, &unlock))
if (!shrinker_lock(i915, 0, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(i915,
......@@ -388,31 +385,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return sc->nr_scanned ? freed : SHRINK_STOP;
}
static bool
shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
int timeout_ms)
{
unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
do {
if (i915_gem_wait_for_idle(i915,
0, MAX_SCHEDULE_TIMEOUT) == 0 &&
shrinker_lock(i915, unlock))
break;
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
if (time_after(jiffies, timeout)) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;
}
} while (1);
return true;
}
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
......@@ -421,7 +393,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
freed_pages = i915_gem_shrink_all(i915);
intel_runtime_pm_get(i915);
freed_pages = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
intel_runtime_pm_put(i915);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
......@@ -447,10 +423,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
pr_info("Purging GPU memory, %lu pages freed, "
"%lu pages still pinned.\n",
freed_pages, unevictable);
if (unbound || bound)
pr_err("%lu and %lu pages still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
......@@ -464,23 +436,20 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
bool unlock;
int ret;
if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret)
if (i915_gem_wait_for_idle(i915,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT))
goto out;
intel_runtime_pm_get(i915);
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
intel_runtime_pm_put(i915);
......@@ -533,13 +502,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
unregister_shrinker(&i915->mm.shrinker);
}
void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
struct mutex *mutex)
{
bool unlock = false;
if (!IS_ENABLED(CONFIG_LOCKDEP))
return;
if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
mutex_acquire(&i915->drm.struct_mutex.dep_map,
I915_MM_NORMAL, 0, _RET_IP_);
unlock = true;
}
fs_reclaim_acquire(GFP_KERNEL);
mutex_lock(mutex);
mutex_unlock(mutex);
/*
* As we invariably rely on the struct_mutex within the shrinker,
* but have a complicated recursion dance, taint all the mutexes used
* within the shrinker with the struct_mutex. For completeness, we
* taint with all subclass of struct_mutex, even though we should
* only need tainting by I915_MM_NORMAL to catch possible ABBA
* deadlocks from using struct_mutex inside @mutex.
*/
mutex_acquire(&i915->drm.struct_mutex.dep_map,
I915_MM_SHRINKER, 0, _RET_IP_);
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, 0, _RET_IP_);
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
fs_reclaim_release(GFP_KERNEL);
if (unlock)
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
......@@ -26,7 +26,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev_priv))
if (IS_GEN(dev_priv, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
......@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
if (r == NULL && !IS_GEN(dev_priv, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
......@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
......
......@@ -27,7 +27,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
}
/* Previous chips need a power-of-two fence region when tiling */
if (IS_GEN3(i915))
if (IS_GEN(i915, 3))
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
......@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
if (IS_GEN2(i915) ||
if (IS_GEN(i915, 2) ||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
tile_width = 128;
else
......
......@@ -22,7 +22,6 @@
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
......
......@@ -594,13 +594,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info,
const struct intel_runtime_info *runtime,
const struct intel_driver_caps *caps)
{
struct drm_printer p = i915_error_printer(m);
intel_device_info_dump_flags(info, &p);
intel_driver_caps_print(caps, &p);
intel_device_info_dump_topology(&info->sseu, &p);
intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
......@@ -664,7 +665,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: %s\n", init_utsname()->release);
err_printf(m, "Kernel: %s %s\n",
init_utsname()->release,
init_utsname()->machine);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
......@@ -735,7 +738,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
if (IS_GEN7(m->i915))
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
......@@ -844,7 +847,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_capabilities(m, &error->device_info, &error->runtime_info,
&error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
}
......@@ -963,17 +967,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
static __always_inline void free_param(const char *type, void *x)
{
if (!__builtin_strcmp(type, "char *"))
kfree(*(void **)x);
}
static void cleanup_params(struct i915_gpu_state *error)
{
#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
i915_params_free(&error->params);
}
static void cleanup_uc_state(struct i915_gpu_state *error)
......@@ -1037,7 +1034,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dma_addr_t dma;
int ret;
if (!vma)
if (!vma || !vma->pages)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
......@@ -1314,7 +1311,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
if (IS_GEN7(dev_priv)) {
if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
case RCS:
......@@ -1330,7 +1327,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (IS_GEN6(engine->i915)) {
} else if (IS_GEN(engine->i915, 6)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
......@@ -1352,10 +1349,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev_priv))
if (IS_GEN(dev_priv, 6))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev_priv))
else if (IS_GEN(dev_priv, 7))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_GEN(dev_priv) >= 8)
......@@ -1725,7 +1722,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
if (IS_GEN7(dev_priv))
if (IS_GEN(dev_priv, 7))
error->err_int = I915_READ(GEN7_ERR_INT);
if (INTEL_GEN(dev_priv) >= 8) {
......@@ -1733,7 +1730,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
if (IS_GEN6(dev_priv)) {
if (IS_GEN(dev_priv, 6)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
......@@ -1753,7 +1750,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ccid = I915_READ(CCID);
/* 3: Feature specific registers */
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
......@@ -1777,7 +1774,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
......@@ -1831,21 +1828,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
memcpy(&error->runtime_info,
RUNTIME_INFO(i915),
sizeof(error->runtime_info));
error->driver_caps = i915->caps;
}
static __always_inline void dup_param(const char *type, void *x)
{
if (!__builtin_strcmp(type, "char *"))
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
static void capture_params(struct i915_gpu_state *error)
{
error->params = i915_modparams;
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
i915_params_copy(&error->params, &i915_modparams);
}
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
......@@ -1907,9 +1898,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error)
return NULL;
if (!error) {
i915_disable_error_state(i915, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
kref_init(&error->ref);
error->i915 = i915;
......@@ -1945,11 +1943,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
error = i915_capture_gpu_state(i915);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
i915_disable_error_state(i915, -ENOMEM);
if (IS_ERR(error))
return;
}
i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
......@@ -1987,7 +1982,7 @@ i915_first_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
if (error)
if (!IS_ERR_OR_NULL(error))
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
......@@ -2000,10 +1995,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
i915->gpu_error.first_error = NULL;
if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
if (!IS_ERR(error))
if (!IS_ERR_OR_NULL(error))
i915_gpu_state_put(error);
}
......
......@@ -45,6 +45,7 @@ struct i915_gpu_state {
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
struct intel_runtime_info runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
......
......@@ -28,7 +28,6 @@
*/
#include <linux/compat.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......
......@@ -31,7 +31,6 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
......@@ -950,7 +949,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
if (IS_GEN2(dev_priv))
if (IS_GEN(dev_priv, 2))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
......@@ -1030,7 +1029,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
......@@ -1090,7 +1089,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
*vpos = position;
*hpos = 0;
} else {
......@@ -1189,13 +1188,6 @@ static void notify_ring(struct intel_engine_cs *engine)
rq = i915_request_get(waiter);
tsk = wait->tsk;
} else {
if (engine->irq_seqno_barrier &&
i915_seqno_passed(seqno, wait->seqno - 1)) {
set_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted);
tsk = wait->tsk;
}
}
engine->breadcrumbs.irq_count++;
......@@ -2547,7 +2539,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIIR, pch_iir);
}
if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
ironlake_rps_change_irq_handler(dev_priv);
}
......@@ -3243,7 +3235,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
{
u32 eir;
if (!IS_GEN2(dev_priv))
if (!IS_GEN(dev_priv, 2))
I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
if (INTEL_GEN(dev_priv) < 4)
......@@ -3586,11 +3578,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_GEN5(dev_priv))
I915_WRITE(HWSTAM, 0xffffffff);
GEN3_IRQ_RESET(DE);
if (IS_GEN7(dev_priv))
if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
......@@ -4045,7 +4034,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN5(dev_priv)) {
if (IS_GEN(dev_priv, 5)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
......@@ -4183,9 +4172,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
......@@ -4368,8 +4354,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
I915_WRITE16(HWSTAM, 0xffff);
GEN2_IRQ_RESET();
}
......@@ -4537,8 +4521,6 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
GEN3_IRQ_RESET();
}
......@@ -4648,8 +4630,6 @@ static void i965_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
I915_WRITE(HWSTAM, 0xffffffff);
GEN3_IRQ_RESET();
}
......@@ -4836,7 +4816,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
if (IS_GEN(dev_priv, 2)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
......@@ -4852,7 +4832,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
if (!IS_GEN2(dev_priv))
if (!IS_GEN(dev_priv, 2))
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
......@@ -4924,14 +4904,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
if (IS_GEN2(dev_priv)) {
if (IS_GEN(dev_priv, 2)) {
dev->driver->irq_preinstall = i8xx_irq_reset;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
} else if (IS_GEN3(dev_priv)) {
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_reset;
......
......@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
"triaging and debugging hangs.");
#endif
i915_param_named_unsafe(enable_hangcheck, bool, 0644,
i915_param_named_unsafe(enable_hangcheck, bool, 0600,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
......@@ -203,3 +203,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
static __always_inline void dup_param(const char *type, void *x)
{
if (!__builtin_strcmp(type, "char *"))
*(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
}
void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
{
*dest = *src;
#define DUP(T, x, ...) dup_param(#T, &dest->x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
}
static __always_inline void free_param(const char *type, void *x)
{
if (!__builtin_strcmp(type, "char *")) {
kfree(*(void **)x);
*(void **)x = NULL;
}
}
/* free the allocated members, *not* the passed in params itself */
void i915_params_free(struct i915_params *params)
{
#define FREE(T, x, ...) free_param(#T, &params->x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
}
......@@ -33,6 +33,15 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
*
* param(type, name, value)
*
* type: parameter type, one of {bool, int, unsigned int, char *}
* name: name of the parameter
* value: initial/default value of the parameter
*/
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
......@@ -78,6 +87,8 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
void i915_params_free(struct i915_params *params);
#endif
......@@ -82,6 +82,7 @@
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch_display = 1, \
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
......@@ -122,6 +123,7 @@ static const struct intel_device_info intel_i865g_info = {
GEN(3), \
.num_pipes = 2, \
.display.has_gmch_display = 1, \
.gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
......@@ -198,6 +200,7 @@ static const struct intel_device_info intel_pineview_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_gmch_display = 1, \
.gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
......@@ -228,6 +231,7 @@ static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
.gpu_reset_clobbers_display = false,
};
static const struct intel_device_info intel_gm45_info = {
......@@ -237,6 +241,7 @@ static const struct intel_device_info intel_gm45_info = {
.display.has_fbc = 1,
.display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
.gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
......@@ -532,7 +537,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_fbc = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
.display.has_csr = 1, \
.has_rc6 = 1, \
.display.has_dp_mst = 1, \
......
......@@ -1796,7 +1796,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
if (IS_GEN(dev_priv, 9, 11)) {
if (IS_GEN_RANGE(dev_priv, 9, 11)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
......@@ -2646,7 +2646,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
}
/**
......@@ -3415,7 +3415,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
if (IS_GEN_RANGE(dev_priv, 8, 9)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
......@@ -3431,7 +3431,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
if (IS_GEN8(dev_priv)) {
if (IS_GEN(dev_priv, 8)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
......@@ -3442,7 +3442,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
} else if (IS_GEN(dev_priv, 10, 11)) {
} else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
......@@ -3471,7 +3471,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
(INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
(RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock);
......
......@@ -13,7 +13,7 @@
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
......
此差异已折叠。
......@@ -111,99 +111,10 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
static void reserve_gt(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_timeline *timeline;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
GEM_BUG_ON(i915->gt.active_requests);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
GEM_TRACE("%s seqno %d (current %d) -> %d\n",
engine->name,
engine->timeline.seqno,
intel_engine_get_seqno(engine),
seqno);
if (seqno == engine->timeline.seqno)
continue;
kthread_park(engine->breadcrumbs.signaler);
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
intel_engine_init_hangcheck(engine);
GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
}
/* Check we are idle before we fiddle with hw state! */
GEM_BUG_ON(!intel_engine_is_idle(engine));
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
engine->timeline.seqno = seqno;
kthread_unpark(engine->breadcrumbs.signaler);
}
list_for_each_entry(timeline, &i915->gt.timelines, link)
memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
i915->gt.request_serial = seqno;
return 0;
}
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *i915 = to_i915(dev);
lockdep_assert_held(&i915->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we will inject to ring */
return reset_all_global_seqno(i915, seqno - 1);
}
static int reserve_gt(struct drm_i915_private *i915)
{
int ret;
/*
* Reservation is fine until we may need to wrap around
*
* By incrementing the serial for every request, we know that no
* individual engine may exceed that serial (as each is reset to 0
* on any wrap). This protects even the most pessimistic of migrations
* of every request from all engines onto just one.
*/
while (unlikely(++i915->gt.request_serial == 0)) {
ret = reset_all_global_seqno(i915, 0);
if (ret) {
i915->gt.request_serial--;
return ret;
}
}
if (!i915->gt.active_requests++)
i915_gem_unpark(i915);
return 0;
}
static void unreserve_gt(struct drm_i915_private *i915)
......@@ -566,6 +477,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
if (!i915_request_completed(rq))
break;
i915_request_retire(rq);
}
}
static noinline struct i915_request *
i915_request_alloc_slow(struct intel_context *ce)
{
struct intel_ring *ring = ce->ring;
struct i915_request *rq;
if (list_empty(&ring->request_list))
goto out;
/* Ratelimit ourselves to prevent oom from malicious clients */
rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
cond_synchronize_rcu(rq->rcustate);
/* Retire our old requests in the hope that we free some */
ring_retire_requests(ring);
out:
return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
}
/**
* i915_request_alloc - allocate a request structure
*
......@@ -608,13 +551,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (IS_ERR(ce))
return ERR_CAST(ce);
ret = reserve_gt(i915);
if (ret)
goto err_unpin;
ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
if (ret)
goto err_unreserve;
reserve_gt(i915);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
......@@ -654,15 +591,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq = kmem_cache_alloc(i915->requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
i915_retire_requests(i915);
/* Ratelimit ourselves to prevent oom from malicious clients */
rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
&i915->drm.struct_mutex);
if (rq)
cond_synchronize_rcu(rq->rcustate);
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
rq = i915_request_alloc_slow(ce);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
......@@ -707,9 +636,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*
* Note that due to how we add reserved_space to intel_ring_begin()
* we need to double our request to ensure that if we need to wrap
* around inside i915_request_add() there is sufficient space at
* the beginning of the ring as well.
*/
rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
/*
* Record the position of the start of the request so that
......@@ -719,11 +652,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
/* Unconditionally invalidate GPU caches and TLBs. */
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
goto err_unwind;
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
......@@ -748,7 +676,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_gt(i915);
err_unpin:
intel_context_unpin(ce);
return ERR_PTR(ret);
}
......@@ -776,34 +703,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
if (to->engine->semaphore.sync_to) {
u32 seqno;
GEM_BUG_ON(!from->engine->semaphore.signal);
seqno = i915_request_global_seqno(from);
if (!seqno)
goto await_dma_fence;
if (seqno <= to->timeline->global_sync[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
ret = to->engine->semaphore.sync_to(to, from);
if (ret)
return ret;
to->timeline->global_sync[from->engine->id] = seqno;
return 0;
} else {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
}
await_dma_fence:
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
......@@ -979,8 +884,8 @@ void i915_request_add(struct i915_request *request)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
GEM_BUG_ON(request->reserved_space > request->ring->space);
request->reserved_space = 0;
engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
......@@ -1298,13 +1203,7 @@ long i915_request_wait(struct i915_request *rq,
set_current_state(state);
wakeup:
/*
* Carefully check if the request is complete, giving time
* for the seqno to be visible following the interrupt.
* We also have to check in case we are kicked by the GPU
* reset in order to drop the struct_mutex.
*/
if (__i915_request_irq_complete(rq))
if (i915_request_completed(rq))
break;
/*
......@@ -1343,19 +1242,6 @@ long i915_request_wait(struct i915_request *rq,
return timeout;
}
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *request, *next;
list_for_each_entry_safe(request, next,
&ring->request_list, ring_link) {
if (!i915_request_completed(request))
break;
i915_request_retire(request);
}
}
void i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_ring *ring, *tmp;
......
......@@ -30,7 +30,6 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_sw_fence.h"
#include "i915_scheduler.h"
#include <uapi/drm/i915_drm.h>
......
......@@ -24,7 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
......@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
i915_save_display(dev_priv);
if (IS_GEN4(dev_priv))
if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
......@@ -77,14 +76,14 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
......@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
if (IS_GEN4(dev_priv))
if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev_priv);
......@@ -122,14 +121,14 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
/* Scratch space */
if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
} else if (IS_GEN2(dev_priv)) {
} else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
......
......@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
ssize_t ret;
gpu = i915_first_error_state(i915);
if (gpu) {
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
i915_gpu_state_put(gpu);
} else {
......
......@@ -63,14 +63,6 @@ struct i915_timeline {
* redundant and we can discard it without loss of generality.
*/
struct i915_syncmap *sync;
/**
* Separately to the inter-context seqno map above, we track the last
* barrier (e.g. semaphore wait) to the global engine timelines. Note
* that this tracks global_seqno rather than the context.seqno, and
* so it is subject to the limitations of hw wraparound and that we
* may need to revoke global_seqno (on pre-emption).
*/
u32 global_sync[I915_NUM_ENGINES];
struct list_head link;
const char *name;
......
......@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
......@@ -585,35 +584,6 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct i915_request *to, struct i915_request *from),
TP_ARGS(to, from),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, from_class)
__field(u32, from_instance)
__field(u32, to_class)
__field(u32, to_instance)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->i915->drm.primary->index;
__entry->from_class = from->engine->uabi_class;
__entry->from_instance = from->engine->instance;
__entry->to_class = to->engine->uabi_class;
__entry->to_instance = to->engine->instance;
__entry->seqno = from->global_seqno;
),
TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
__entry->dev,
__entry->from_class, __entry->from_instance,
__entry->to_class, __entry->to_instance,
__entry->seqno)
);
TRACE_EVENT(i915_request_queue,
TP_PROTO(struct i915_request *rq, u32 flags),
TP_ARGS(rq, flags),
......
......@@ -6,7 +6,6 @@
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
......
......@@ -29,7 +29,6 @@
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
......@@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
if (IS_GEN9(dev_priv) &&
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
......
......@@ -31,7 +31,6 @@
* prepare/check/commit/cleanup steps.
*/
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
......
......@@ -27,7 +27,6 @@
#include <drm/intel_lpe_audio.h>
#include "intel_drv.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "i915_drv.h"
......@@ -758,7 +757,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
if (!IS_GEN9(dev_priv))
if (!IS_GEN(dev_priv, 9))
return;
i915_audio_component_get_power(kdev);
......
......@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
......@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
if (!IS_GEN(dev_priv, 3, 7)) {
if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return;
}
......@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->supports_dp = is_dp;
info->supports_edp = is_edp;
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
if (bdb_version >= 195)
info->supports_typec_usb = child->dp_usb_type_c;
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
......
......@@ -166,12 +166,6 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
*/
set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
if (engine->irq_enable) {
spin_lock(&engine->i915->irq_lock);
......@@ -683,16 +677,6 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
/* Before we sleep, check for a missed seqno */
if (current->state & TASK_NORMAL &&
!list_empty(&b->signals) &&
engine->irq_seqno_barrier &&
test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted)) {
engine->irq_seqno_barrier(engine);
intel_engine_wakeup(engine);
}
sleep:
if (kthread_should_park())
kthread_parkme();
......@@ -859,16 +843,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
else
irq_disable(engine);
/*
* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
* GPU is active and may have already executed the MI_USER_INTERRUPT
* before the CPU is ready to receive. However, the engine is currently
* idle (we haven't started it yet), there is no possibility for a
* missed interrupt as we enabled the irq and so we can clear the
* immediate wakeup (until a real interrupt arrives for the waiter).
*/
clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
spin_unlock_irqrestore(&b->irq_lock, flags);
}
......
......@@ -2140,7 +2140,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEN9(dev_priv) ||
else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
......@@ -2176,7 +2176,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
} else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
} else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
......@@ -2537,7 +2537,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
else if (IS_GEN9(dev_priv) ||
else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
......@@ -2785,9 +2785,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = hsw_get_cdclk;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_cdclk = vlv_get_cdclk;
else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
else if (IS_GEN5(dev_priv))
else if (IS_GEN(dev_priv, 5))
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
else if (IS_GM45(dev_priv))
dev_priv->display.get_cdclk = gm45_get_cdclk;
......
......@@ -74,12 +74,17 @@
#define ILK_CSC_COEFF_1_0 \
((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
static bool lut_is_legacy(struct drm_property_blob *lut)
{
return !state->degamma_lut &&
!state->ctm &&
state->gamma_lut &&
drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
}
static bool crtc_state_is_legacy_gamma(struct intel_crtc_state *crtc_state)
{
return !crtc_state->base.degamma_lut &&
!crtc_state->base.ctm &&
crtc_state->base.gamma_lut &&
lut_is_legacy(crtc_state->base.gamma_lut);
}
/*
......@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
{
int pipe = intel_crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int pipe = crtc->pipe;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
......@@ -132,14 +137,12 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
}
static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i, pipe = crtc->pipe;
uint16_t coeffs[9] = { 0, };
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
bool limited_color_range = false;
/*
......@@ -147,14 +150,14 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
* do the range compression using the gamma LUT instead.
*/
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = intel_crtc_state->limited_color_range;
limited_color_range = crtc_state->limited_color_range;
if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(intel_crtc);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(crtc);
return;
} else if (crtc_state->ctm) {
struct drm_color_ctm *ctm = crtc_state->ctm->data;
} else if (crtc_state->base.ctm) {
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
const u64 *input;
u64 temp[9];
......@@ -253,16 +256,15 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
/*
* Set up the pipe CSC unit on CherryView.
*/
static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc)->pipe;
int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t mode;
if (state->ctm) {
struct drm_color_ctm *ctm = state->ctm->data;
if (crtc_state->base.ctm) {
struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
uint16_t coeffs[9] = { 0, };
int i;
......@@ -293,17 +295,17 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
if (!crtc_state_is_legacy_gamma(state)) {
mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
if (!crtc_state_is_legacy_gamma(crtc_state)) {
mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
}
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
void intel_color_set_csc(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->display.load_csc_matrix)
......@@ -311,14 +313,12 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
}
/* Loads the legacy palette/gamma unit for the CRTC. */
static void i9xx_load_luts_internal(struct drm_crtc *crtc,
struct drm_property_blob *blob,
struct intel_crtc_state *crtc_state)
static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
struct drm_property_blob *blob)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
int i;
if (HAS_GMCH_DISPLAY(dev_priv)) {
......@@ -353,53 +353,48 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
}
static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
static void i9xx_load_luts(struct intel_crtc_state *crtc_state)
{
i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
to_intel_crtc_state(crtc_state));
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
static void haswell_load_luts(struct drm_crtc_state *crtc_state)
static void haswell_load_luts(struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool reenable_ips = false;
/*
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
(intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(intel_crtc_state);
if (IS_HASWELL(dev_priv) && crtc_state->ips_enabled &&
(crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
hsw_disable_ips(crtc_state);
reenable_ips = true;
}
intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
I915_WRITE(GAMMA_MODE(crtc->pipe), GAMMA_MODE_MODE_8BIT);
i9xx_load_luts(crtc_state);
if (reenable_ips)
hsw_enable_ips(intel_crtc_state);
hsw_enable_ips(crtc_state);
}
static void bdw_load_degamma_lut(struct drm_crtc_state *state)
static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
if (state->degamma_lut) {
struct drm_color_lut *lut = state->degamma_lut->data;
if (crtc_state->base.degamma_lut) {
struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
......@@ -419,10 +414,10 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
}
}
static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
......@@ -432,8 +427,8 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
PAL_PREC_AUTO_INCREMENT |
offset);
if (state->gamma_lut) {
struct drm_color_lut *lut = state->gamma_lut->data;
if (crtc_state->base.gamma_lut) {
struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
......@@ -467,22 +462,21 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
static void broadwell_load_luts(struct drm_crtc_state *state)
static void broadwell_load_luts(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
haswell_load_luts(crtc_state);
return;
}
bdw_load_degamma_lut(state);
bdw_load_gamma_lut(state,
bdw_load_degamma_lut(crtc_state);
bdw_load_gamma_lut(crtc_state,
INTEL_INFO(dev_priv)->color.degamma_lut_size);
intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
POSTING_READ(GAMMA_MODE(pipe));
......@@ -493,10 +487,10 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
static void glk_load_degamma_lut(struct drm_crtc_state *state)
static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
const uint32_t lut_size = 33;
uint32_t i;
......@@ -523,49 +517,46 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
}
static void glk_load_luts(struct drm_crtc_state *state)
static void glk_load_luts(struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
glk_load_degamma_lut(state);
glk_load_degamma_lut(crtc_state);
if (crtc_state_is_legacy_gamma(state)) {
haswell_load_luts(state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
haswell_load_luts(crtc_state);
return;
}
bdw_load_gamma_lut(state, 0);
bdw_load_gamma_lut(crtc_state, 0);
intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
POSTING_READ(GAMMA_MODE(pipe));
}
/* Loads the palette/gamma unit for the CRTC on CherryView. */
static void cherryview_load_luts(struct drm_crtc_state *state)
static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
uint32_t word0, word1;
if (crtc_state_is_legacy_gamma(state)) {
if (crtc_state_is_legacy_gamma(crtc_state)) {
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0));
i9xx_load_luts_internal(crtc, state->gamma_lut,
to_intel_crtc_state(state));
(crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0));
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
return;
}
if (state->degamma_lut) {
lut = state->degamma_lut->data;
if (crtc_state->base.degamma_lut) {
lut = crtc_state->base.degamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
......@@ -579,8 +570,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
}
if (state->gamma_lut) {
lut = state->gamma_lut->data;
if (crtc_state->base.gamma_lut) {
lut = crtc_state->base.gamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
......@@ -595,29 +586,28 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
I915_WRITE(CGM_PIPE_MODE(pipe),
(state->ctm ? CGM_PIPE_MODE_CSC : 0) |
(state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
(crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0) |
(crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
(crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
/*
* Also program a linear LUT in the legacy block (behind the
* CGM block).
*/
i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
i9xx_load_luts_internal(crtc_state, NULL);
}
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
void intel_color_load_luts(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.load_luts(crtc_state);
}
int intel_color_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
size_t gamma_length, degamma_length;
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
......@@ -627,10 +617,10 @@ int intel_color_check(struct drm_crtc *crtc,
* We allow both degamma & gamma luts at the right size or
* NULL.
*/
if ((!crtc_state->degamma_lut ||
drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
(!crtc_state->gamma_lut ||
drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
if ((!crtc_state->base.degamma_lut ||
drm_color_lut_size(crtc_state->base.degamma_lut) == degamma_length) &&
(!crtc_state->base.gamma_lut ||
drm_color_lut_size(crtc_state->base.gamma_lut) == gamma_length))
return 0;
/*
......@@ -643,11 +633,11 @@ int intel_color_check(struct drm_crtc *crtc,
return -EINVAL;
}
void intel_color_init(struct drm_crtc *crtc)
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
......@@ -669,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
/* Enable color management support when we have degamma & gamma LUTs. */
if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
drm_crtc_enable_color_mgmt(crtc,
drm_crtc_enable_color_mgmt(&crtc->base,
INTEL_INFO(dev_priv)->color.degamma_lut_size,
true,
INTEL_INFO(dev_priv)->color.gamma_lut_size);
......
......@@ -27,7 +27,6 @@
#include <linux/i2c.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
......
......@@ -27,7 +27,6 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
......@@ -322,7 +321,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
else if (IS_GEN_RANGE(dev_priv, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
......@@ -667,7 +666,7 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
if (!IS_GEN2(dev_priv)) {
if (!IS_GEN(dev_priv, 2)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
......@@ -982,7 +981,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev_priv))
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
......
此差异已折叠。
......@@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
void intel_device_info_dump_runtime(const struct intel_device_info *info,
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
......@@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
info->cs_timestamp_frequency_khz);
}
void intel_device_info_dump(const struct intel_device_info *info,
struct drm_printer *p)
{
struct drm_i915_private *dev_priv =
container_of(info, struct drm_i915_private, info);
drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(info->platform),
info->gen);
intel_device_info_dump_flags(info, p);
}
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
......@@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
u32 ss_en, ss_en_mask;
u8 eu_en;
......@@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
const u32 fuse2 = I915_READ(GEN8_FUSE2);
int s, ss;
const int eu_mask = 0xff;
......@@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
......@@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct sseu_dev_info *sseu = &info->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, eu_disable, subslice_mask;
const u8 eu_mask = 0xff;
......@@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
......@@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct sseu_dev_info *sseu = &info->sseu;
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
int s, ss;
......@@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
* There isn't a register to tell us how many slices/subslices. We
* work off the PCI-ids here.
*/
switch (info->gt) {
switch (INTEL_INFO(dev_priv)->gt) {
default:
MISSING_CASE(info->gt);
MISSING_CASE(INTEL_INFO(dev_priv)->gt);
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
......@@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
/**
* intel_device_info_runtime_init - initialize runtime info
* @info: intel device info struct
* @dev_priv: the i915 device
*
* Determine various intel_device_info fields at runtime.
*
......@@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
void intel_device_info_runtime_init(struct intel_device_info *info)
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv =
container_of(info, struct drm_i915_private, info);
struct intel_device_info *info = mkwrite_device_info(dev_priv);
struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
} else if (IS_GEN9(dev_priv)) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
runtime->num_scalers[pipe] = 2;
} else if (IS_GEN(dev_priv, 9)) {
runtime->num_scalers[PIPE_A] = 2;
runtime->num_scalers[PIPE_B] = 2;
runtime->num_scalers[PIPE_C] = 1;
}
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
if (IS_GEN11(dev_priv))
if (IS_GEN(dev_priv, 11))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 6;
else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
runtime->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
......@@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
* down the line.
*/
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
runtime->num_sprites[PIPE_A] = 2;
runtime->num_sprites[PIPE_B] = 2;
runtime->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
runtime->num_sprites[pipe] = 2;
} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
runtime->num_sprites[pipe] = 1;
}
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
(IS_GEN_RANGE(dev_priv, 7, 8)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
......@@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
} else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
......@@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
else if (IS_GEN9(dev_priv))
else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
else if (IS_GEN10(dev_priv))
else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
if (IS_GEN6(dev_priv) && intel_vtd_active()) {
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
info->ppgtt = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
......@@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
u32 media_fuse;
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
if (!(BIT(i) & info->vdbox_enable)) {
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
}
/*
* In Gen11, only even numbered logical VDBOXes are
* hooked up to an SFC (Scaler & Format Converter) unit.
*/
if (logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
if (!(BIT(i) & info->vebox_enable)) {
if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
......
......@@ -89,6 +89,7 @@ enum intel_ppgtt {
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
......@@ -152,12 +153,10 @@ struct sseu_dev_info {
typedef u8 intel_ring_mask_t;
struct intel_device_info {
u16 device_id;
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
u8 num_rings;
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
......@@ -169,8 +168,6 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
......@@ -189,6 +186,20 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int cursor_offsets[I915_MAX_PIPES];
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
} color;
};
struct intel_runtime_info {
u16 device_id;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
u8 num_rings;
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
......@@ -198,10 +209,8 @@ struct intel_device_info {
u8 vdbox_enable;
u8 vebox_enable;
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
} color;
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
};
struct intel_driver_caps {
......@@ -258,12 +267,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_runtime_init(struct intel_device_info *info);
void intel_device_info_dump(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
void intel_device_info_dump_runtime(const struct intel_device_info *info,
void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p);
......
此差异已折叠。
......@@ -23,7 +23,6 @@
*
*/
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册