提交 66fd7a66 编写于 作者: D Dave Airlie

Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2016-05-22:
- cmd-parser support for direct reg->reg loads (Ken Graunke)
- better handle DP++ smart dongles (Ville)
- bxt guc fw loading support (Nick Hoathe)
- remove a bunch of struct typedefs from dpll code (Ander)
- tons of small work all over to avoid casting between drm_device and the i915
  dev struct (Tvrtko&Chris)
- untangle request retiring from other operations, also fixes reset stat corner
  cases (Chris)
- skl atomic watermark support from Matt Roper, yay!
- various wm handling bugfixes from Ville
- big pile of cdclck rework for bxt/skl (Ville)
- CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M)
- nonblocking atomic commits for plane-only updates (Maarten Lankhorst)
- bunch of PSR fixes&improvements
- untangle our map/pin/sg_iter code a bit (Dave Gordon)
drm-intel-next-2016-05-08:
- refactor stolen quirks to share code between early quirks and i915 (Joonas)
- refactor gem BO/vma funcstion (Tvrtko&Dave)
- backlight over DPCD support (Yetunde Abedisi)
- more dsi panel sequence support (Jani)
- lots of refactoring around handling iomaps, vma, ring access and related
  topics culmulating in removing the duplicated request tracking in the execlist
  code (Chris & Tvrtko) includes a small patch for core iomapping code
- hw state readout for bxt dsi (Ramalingam C)
- cdclk cleanups (Ville)
- dedupe chv pll code a bit (Ander)
- enable semaphores on gen8+ for legacy submission, to be able to have a direct
  comparison against execlist on the same platform (Chris) Not meant to be used
  for anything else but performance tuning
- lvds border bit hw state checker fix (Jani)
- rpm vs. shrinker/oom-notifier fixes (Praveen Paneri)
- l3 tuning (Imre)
- revert mst dp audio, it's totally non-functional and crash-y (Lyude)
- first official dmc for kbl (Rodrigo)
- and tons of small things all over as usual

* 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits)
  drm/i915: Revert async unpin and nonblocking atomic commit
  drm/i915: Update DRIVER_DATE to 20160522
  drm/i915: Inline sg_next() for the optimised SGL iterator
  drm/i915: Introduce & use new lightweight SGL iterators
  drm/i915: optimise i915_gem_object_map() for small objects
  drm/i915: refactor i915_gem_object_pin_map()
  drm/i915/psr: Implement PSR2 w/a for gen9
  drm/i915/psr: Use ->get_aux_send_ctl functions
  drm/i915/psr: Order DP aux transactions correctly
  drm/i915/psr: Make idle_frames sensible again
  drm/i915/psr: Try to program link training times correctly
  drm/i915/userptr: Convert to drm_i915_private
  drm/i915: Allow nonblocking update of pageflips.
  drm/i915: Check for unpin correctness.
  Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates"
  drm/i915: Make unpin async.
  drm/i915: Prepare connectors for nonblocking checks.
  drm/i915: Pass atomic states to fbc update functions.
  drm/i915: Remove reset_counter from intel_crtc.
  drm/i915: Remove queue_flip pointer.
  ...
...@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func) ...@@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
* despite the efforts of the "RAM buffer" approach, which simply rounds * despite the efforts of the "RAM buffer" approach, which simply rounds
* memory boundaries up to 64M to try to catch space that may decode * memory boundaries up to 64M to try to catch space that may decode
* as RAM and so is not suitable for MMIO. * as RAM and so is not suitable for MMIO.
*
* And yes, so far on current devices the base addr is always under 4G.
*/ */
static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
{
u32 base;
/*
* For the PCI IDs in this quirk, the stolen base is always
* in 0x5c, aka the BDSM register (yes that's really what
* it's called).
*/
base = read_pci_config(num, slot, func, 0x5c);
base &= ~((1<<20) - 1);
return base;
}
#define KB(x) ((x) * 1024UL) #define KB(x) ((x) * 1024UL)
#define MB(x) (KB (KB (x))) #define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
static size_t __init i830_tseg_size(void) static size_t __init i830_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
if (tmp & I830_TSEG_SIZE_1M) if (esmramc & I830_TSEG_SIZE_1M)
return MB(1); return MB(1);
else else
return KB(512); return KB(512);
...@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void) ...@@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void)
static size_t __init i845_tseg_size(void) static size_t __init i845_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
switch (tmp & I845_TSEG_SIZE_MASK) { switch (tseg_size) {
case I845_TSEG_SIZE_512K: case I845_TSEG_SIZE_512K: return KB(512);
return KB(512); case I845_TSEG_SIZE_1M: return MB(1);
case I845_TSEG_SIZE_1M:
return MB(1);
default: default:
WARN_ON(1); WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
return 0;
} }
return 0;
} }
static size_t __init i85x_tseg_size(void) static size_t __init i85x_tseg_size(void)
{ {
u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
if (!(tmp & TSEG_ENABLE)) if (!(esmramc & TSEG_ENABLE))
return 0; return 0;
return MB(1); return MB(1);
...@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void) ...@@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void)
* On 830/845/85x the stolen memory base isn't available in any * On 830/845/85x the stolen memory base isn't available in any
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
*/ */
static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i830_mem_size() - i830_tseg_size() - stolen_size; return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
} }
static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i830_mem_size() - i845_tseg_size() - stolen_size; return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
} }
static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
return i85x_mem_size() - i85x_tseg_size() - stolen_size; return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
} }
static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
size_t stolen_size)
{ {
u16 toud;
/* /*
* FIXME is the graphics stolen memory region * FIXME is the graphics stolen memory region
* always at TOUD? Ie. is it always the last * always at TOUD? Ie. is it always the last
* one to be allocated by the BIOS? * one to be allocated by the BIOS?
*/ */
return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; toud = read_pci_config_16(0, 0, 0, I865_TOUD);
return (phys_addr_t)toud << 16;
}
static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
size_t stolen_size)
{
u32 bsm;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at register BSM (0x5c) in the igfx configuration space. On a few
* (desktop) machines this is also mirrored in the bridge device at
* different locations, or in the MCHBAR.
*/
bsm = read_pci_config(num, slot, func, INTEL_BSM);
return (phys_addr_t)bsm & INTEL_BSM_MASK;
} }
static size_t __init i830_stolen_size(int num, int slot, int func) static size_t __init i830_stolen_size(int num, int slot, int func)
{ {
size_t stolen_size;
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
gms = gmch_ctrl & I830_GMCH_GMS_MASK;
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512: switch (gms) {
stolen_size = KB(512); case I830_GMCH_GMS_STOLEN_512: return KB(512);
break; case I830_GMCH_GMS_STOLEN_1024: return MB(1);
case I830_GMCH_GMS_STOLEN_1024: case I830_GMCH_GMS_STOLEN_8192: return MB(8);
stolen_size = MB(1); /* local memory isn't part of the normal address space */
break; case I830_GMCH_GMS_LOCAL: return 0;
case I830_GMCH_GMS_STOLEN_8192:
stolen_size = MB(8);
break;
case I830_GMCH_GMS_LOCAL:
/* local memory isn't part of the normal address space */
stolen_size = 0;
break;
default: default:
return 0; WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
} }
return stolen_size; return 0;
} }
static size_t __init gen3_stolen_size(int num, int slot, int func) static size_t __init gen3_stolen_size(int num, int slot, int func)
{ {
size_t stolen_size;
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
gms = gmch_ctrl & I855_GMCH_GMS_MASK;
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M: switch (gms) {
stolen_size = MB(1); case I855_GMCH_GMS_STOLEN_1M: return MB(1);
break; case I855_GMCH_GMS_STOLEN_4M: return MB(4);
case I855_GMCH_GMS_STOLEN_4M: case I855_GMCH_GMS_STOLEN_8M: return MB(8);
stolen_size = MB(4); case I855_GMCH_GMS_STOLEN_16M: return MB(16);
break; case I855_GMCH_GMS_STOLEN_32M: return MB(32);
case I855_GMCH_GMS_STOLEN_8M: case I915_GMCH_GMS_STOLEN_48M: return MB(48);
stolen_size = MB(8); case I915_GMCH_GMS_STOLEN_64M: return MB(64);
break; case G33_GMCH_GMS_STOLEN_128M: return MB(128);
case I855_GMCH_GMS_STOLEN_16M: case G33_GMCH_GMS_STOLEN_256M: return MB(256);
stolen_size = MB(16); case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
break; case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
case I855_GMCH_GMS_STOLEN_32M: case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
stolen_size = MB(32); case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
break;
case I915_GMCH_GMS_STOLEN_48M:
stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
default: default:
stolen_size = 0; WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
break;
} }
return stolen_size; return 0;
} }
static size_t __init gen6_stolen_size(int num, int slot, int func) static size_t __init gen6_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */ return (size_t)gms * MB(32);
} }
static size_t __init gen8_stolen_size(int num, int slot, int func) static size_t __init gen8_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
gmch_ctrl &= BDW_GMCH_GMS_MASK;
return gmch_ctrl << 25; /* 32 MB units */ return (size_t)gms * MB(32);
} }
static size_t __init chv_stolen_size(int num, int slot, int func) static size_t __init chv_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
gmch_ctrl &= SNB_GMCH_GMS_MASK;
/* /*
* 0x0 to 0x10: 32MB increments starting at 0MB * 0x0 to 0x10: 32MB increments starting at 0MB
* 0x11 to 0x16: 4MB increments starting at 8MB * 0x11 to 0x16: 4MB increments starting at 8MB
* 0x17 to 0x1d: 4MB increments start at 36MB * 0x17 to 0x1d: 4MB increments start at 36MB
*/ */
if (gmch_ctrl < 0x11) if (gms < 0x11)
return gmch_ctrl << 25; return (size_t)gms * MB(32);
else if (gmch_ctrl < 0x17) else if (gms < 0x17)
return (gmch_ctrl - 0x11 + 2) << 22; return (size_t)(gms - 0x11 + 2) * MB(4);
else else
return (gmch_ctrl - 0x17 + 9) << 22; return (size_t)(gms - 0x17 + 9) * MB(4);
} }
struct intel_stolen_funcs {
size_t (*size)(int num, int slot, int func);
u32 (*base)(int num, int slot, int func, size_t size);
};
static size_t __init gen9_stolen_size(int num, int slot, int func) static size_t __init gen9_stolen_size(int num, int slot, int func)
{ {
u16 gmch_ctrl; u16 gmch_ctrl;
u16 gms;
gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
gmch_ctrl &= BDW_GMCH_GMS_MASK;
if (gmch_ctrl < 0xf0) /* 0x0 to 0xef: 32MB increments starting at 0MB */
return gmch_ctrl << 25; /* 32 MB units */ /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
if (gms < 0xf0)
return (size_t)gms * MB(32);
else else
/* 4MB increments starting at 0xf0 for 4MB */ return (size_t)(gms - 0xf0 + 1) * MB(4);
return (gmch_ctrl - 0xf0 + 1) << 22;
} }
typedef size_t (*stolen_size_fn)(int num, int slot, int func); struct intel_early_ops {
size_t (*stolen_size)(int num, int slot, int func);
phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
};
static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { static const struct intel_early_ops i830_early_ops __initconst = {
.base = i830_stolen_base, .stolen_base = i830_stolen_base,
.size = i830_stolen_size, .stolen_size = i830_stolen_size,
}; };
static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { static const struct intel_early_ops i845_early_ops __initconst = {
.base = i845_stolen_base, .stolen_base = i845_stolen_base,
.size = i830_stolen_size, .stolen_size = i830_stolen_size,
}; };
static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { static const struct intel_early_ops i85x_early_ops __initconst = {
.base = i85x_stolen_base, .stolen_base = i85x_stolen_base,
.size = gen3_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { static const struct intel_early_ops i865_early_ops __initconst = {
.base = i865_stolen_base, .stolen_base = i865_stolen_base,
.size = gen3_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { static const struct intel_early_ops gen3_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen3_stolen_size, .stolen_size = gen3_stolen_size,
}; };
static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { static const struct intel_early_ops gen6_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen6_stolen_size, .stolen_size = gen6_stolen_size,
}; };
static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { static const struct intel_early_ops gen8_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen8_stolen_size, .stolen_size = gen8_stolen_size,
}; };
static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { static const struct intel_early_ops gen9_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = gen9_stolen_size, .stolen_size = gen9_stolen_size,
}; };
static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { static const struct intel_early_ops chv_early_ops __initconst = {
.base = intel_stolen_base, .stolen_base = gen3_stolen_base,
.size = chv_stolen_size, .stolen_size = chv_stolen_size,
}; };
static const struct pci_device_id intel_stolen_ids[] __initconst = { static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_stolen_funcs), INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_stolen_funcs), INTEL_I845G_IDS(&i845_early_ops),
INTEL_I85X_IDS(&i85x_stolen_funcs), INTEL_I85X_IDS(&i85x_early_ops),
INTEL_I865G_IDS(&i865_stolen_funcs), INTEL_I865G_IDS(&i865_early_ops),
INTEL_I915G_IDS(&gen3_stolen_funcs), INTEL_I915G_IDS(&gen3_early_ops),
INTEL_I915GM_IDS(&gen3_stolen_funcs), INTEL_I915GM_IDS(&gen3_early_ops),
INTEL_I945G_IDS(&gen3_stolen_funcs), INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_stolen_funcs), INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_M_IDS(&gen6_stolen_funcs), INTEL_VLV_M_IDS(&gen6_early_ops),
INTEL_VLV_D_IDS(&gen6_stolen_funcs), INTEL_VLV_D_IDS(&gen6_early_ops),
INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), INTEL_PINEVIEW_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_stolen_funcs), INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_stolen_funcs), INTEL_G33_IDS(&gen3_early_ops),
INTEL_I965GM_IDS(&gen3_stolen_funcs), INTEL_I965GM_IDS(&gen3_early_ops),
INTEL_GM45_IDS(&gen3_stolen_funcs), INTEL_GM45_IDS(&gen3_early_ops),
INTEL_G45_IDS(&gen3_stolen_funcs), INTEL_G45_IDS(&gen3_early_ops),
INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
INTEL_SNB_D_IDS(&gen6_stolen_funcs), INTEL_SNB_D_IDS(&gen6_early_ops),
INTEL_SNB_M_IDS(&gen6_stolen_funcs), INTEL_SNB_M_IDS(&gen6_early_ops),
INTEL_IVB_M_IDS(&gen6_stolen_funcs), INTEL_IVB_M_IDS(&gen6_early_ops),
INTEL_IVB_D_IDS(&gen6_stolen_funcs), INTEL_IVB_D_IDS(&gen6_early_ops),
INTEL_HSW_D_IDS(&gen6_stolen_funcs), INTEL_HSW_D_IDS(&gen6_early_ops),
INTEL_HSW_M_IDS(&gen6_stolen_funcs), INTEL_HSW_M_IDS(&gen6_early_ops),
INTEL_BDW_M_IDS(&gen8_stolen_funcs), INTEL_BDW_M_IDS(&gen8_early_ops),
INTEL_BDW_D_IDS(&gen8_stolen_funcs), INTEL_BDW_D_IDS(&gen8_early_ops),
INTEL_CHV_IDS(&chv_stolen_funcs), INTEL_CHV_IDS(&chv_early_ops),
INTEL_SKL_IDS(&gen9_stolen_funcs), INTEL_SKL_IDS(&gen9_early_ops),
INTEL_BXT_IDS(&gen9_stolen_funcs), INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_stolen_funcs), INTEL_KBL_IDS(&gen9_early_ops),
}; };
static void __init intel_graphics_stolen(int num, int slot, int func) static void __init
intel_graphics_stolen(int num, int slot, int func,
const struct intel_early_ops *early_ops)
{ {
phys_addr_t base, end;
size_t size; size_t size;
size = early_ops->stolen_size(num, slot, func);
base = early_ops->stolen_base(num, slot, func, size);
if (!size || !base)
return;
end = base + size - 1;
printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
&base, &end);
/* Mark this space as reserved */
e820_add_region(base, size, E820_RESERVED);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
static void __init intel_graphics_quirks(int num, int slot, int func)
{
const struct intel_early_ops *early_ops;
u16 device;
int i; int i;
u32 start;
u16 device, subvendor, subdevice;
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
subvendor = read_pci_config_16(num, slot, func,
PCI_SUBSYSTEM_VENDOR_ID); for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { if (intel_early_ids[i].device != device)
if (intel_stolen_ids[i].device == device) { continue;
const struct intel_stolen_funcs *stolen_funcs =
(const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; early_ops = (typeof(early_ops))driver_data;
size = stolen_funcs->size(num, slot, func);
start = stolen_funcs->base(num, slot, func, size); intel_graphics_stolen(num, slot, func, early_ops);
if (size && start) {
printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", return;
start, start + (u32)size - 1);
/* Mark this space as reserved */
e820_add_region(start, size, E820_RESERVED);
sanitize_e820_map(e820.map,
ARRAY_SIZE(e820.map),
&e820.nr_map);
}
return;
}
} }
} }
...@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = { ...@@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
QFLAG_APPLY_ONCE, intel_graphics_stolen }, QFLAG_APPLY_ONCE, intel_graphics_quirks },
/* /*
* HPET on the current version of the Baytrail platform has accuracy * HPET on the current version of the Baytrail platform has accuracy
* problems: it will halt in deep idle state - so we disable it. * problems: it will halt in deep idle state - so we disable it.
......
...@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, ...@@ -242,6 +242,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
} }
/**
* drm_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
* This function is similar to @drm_crtc_vblank_count but this
* function interpolates to handle a race with vblank irq's.
*
* This is mostly useful for hardware that can obtain the scanout
* position, but doesn't have a frame counter.
*/
u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
u32 vblank;
unsigned long flags;
WARN(!dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
drm_update_vblank_count(dev, pipe, 0);
vblank = drm_vblank_count(dev, pipe);
spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
return vblank;
}
EXPORT_SYMBOL(drm_accurate_vblank_count);
/* /*
* Disable vblank irq's on crtc, make sure that last vblank count * Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter * of hardware and corresponding consistent software vblank counter
......
...@@ -59,6 +59,7 @@ i915-y += intel_audio.o \ ...@@ -59,6 +59,7 @@ i915-y += intel_audio.o \
intel_bios.o \ intel_bios.o \
intel_color.o \ intel_color.o \
intel_display.o \ intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \ intel_dpll_mgr.o \
intel_fbc.o \ intel_fbc.o \
intel_fifo_underrun.o \ intel_fifo_underrun.o \
...@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \ ...@@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \ dvo_tfp410.o \
intel_crt.o \ intel_crt.o \
intel_ddi.o \ intel_ddi.o \
intel_dp_aux_backlight.o \
intel_dp_link_training.o \ intel_dp_link_training.o \
intel_dp_mst.o \ intel_dp_mst.o \
intel_dp.o \ intel_dp.o \
intel_dsi.o \ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \ intel_dsi_panel_vbt.o \
intel_dsi_pll.o \ intel_dsi_pll.o \
intel_dvo.o \ intel_dvo.o \
......
...@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { ...@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
...@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN7(engine->dev)) if (!IS_GEN7(engine->i915))
return 0; return 0;
switch (engine->id) { switch (engine->id) {
case RCS: case RCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmds;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmds);
...@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables; engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else { } else {
...@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case BCS: case BCS:
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds; cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else { } else {
...@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) ...@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
} }
if (IS_HASWELL(engine->dev)) { if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables; engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else { } else {
...@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) ...@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser) if (!engine->needs_cmd_parser)
return false; return false;
if (!USES_PPGTT(engine->dev)) if (!USES_PPGTT(engine->i915))
return false; return false;
return (i915.enable_cmd_parser == 1); return (i915.enable_cmd_parser == 1);
...@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0); *oacontrol_set = (cmd[offset + 1] != 0);
} }
...@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, ...@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false; return false;
} }
if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr);
return false;
}
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length || (offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) { (cmd[offset + 1] & reg->mask) != reg->value)) {
...@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine, ...@@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
* *
* Return: the current version number of the cmd parser * Return: the current version number of the cmd parser
*/ */
int i915_cmd_parser_get_version(void) int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine;
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv) {
if (i915_needs_cmd_parser(engine)) {
active = true;
break;
}
}
if (!active)
return 0;
/* /*
* Command parser version history * Command parser version history
* *
...@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void) ...@@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers. * 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers * 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/ */
return 6; return 7;
} }
...@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data) ...@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0; return 0;
} }
static const char get_active_flag(struct drm_i915_gem_object *obj) static char get_active_flag(struct drm_i915_gem_object *obj)
{ {
return obj->active ? '*' : ' '; return obj->active ? '*' : ' ';
} }
static const char get_pin_flag(struct drm_i915_gem_object *obj) static char get_pin_flag(struct drm_i915_gem_object *obj)
{ {
return obj->pin_display ? 'p' : ' '; return obj->pin_display ? 'p' : ' ';
} }
static const char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj)
{ {
switch (obj->tiling_mode) { switch (obj->tiling_mode) {
default: default:
...@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj) ...@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
} }
} }
static inline const char get_global_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj)
{ {
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
} }
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{ {
return obj->mapping ? 'M' : ' '; return obj->mapping ? 'M' : ' ';
} }
...@@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -607,18 +607,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) { for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe); const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane); const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work; struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock); spin_lock_irq(&dev->event_lock);
work = crtc->unpin_work; work = crtc->flip_work;
if (work == NULL) { if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
u32 pending;
u32 addr; u32 addr;
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { pending = atomic_read(&work->pending);
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", if (pending) {
seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
...@@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -638,11 +640,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank, work->flip_queued_vblank,
work->flip_ready_vblank, work->flip_ready_vblank,
drm_crtc_vblank_count(&crtc->base)); intel_crtc_get_vblank_counter(crtc));
if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, ");
else
seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
...@@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1383,7 +1381,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seqno[id] = engine->get_seqno(engine); seqno[id] = engine->get_seqno(engine);
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -2004,7 +2002,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
ctx->legacy_hw_ctx.rcs_state == NULL) ctx->legacy_hw_ctx.rcs_state == NULL)
continue; continue;
seq_puts(m, "HW context "); seq_printf(m, "HW context %u ", ctx->hw_id);
describe_ctx(m, ctx); describe_ctx(m, ctx);
if (ctx == dev_priv->kernel_context) if (ctx == dev_priv->kernel_context)
seq_printf(m, "(kernel context) "); seq_printf(m, "(kernel context) ");
...@@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2046,15 +2044,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0; unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (ctx_obj == NULL) { if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n", seq_puts(m, "\tNot allocated\n");
engine->name);
return; return;
} }
seq_printf(m, "CONTEXT: %s %u\n", engine->name,
intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n"); seq_puts(m, "\tNot bound in GGTT\n");
else else
...@@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) ...@@ -2100,9 +2096,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context) for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine);
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2173,8 +2168,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count); seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) { if (head_req) {
seq_printf(m, "\tHead request id: %u\n", seq_printf(m, "\tHead request context: %u\n",
intel_execlists_ctx_id(head_req->ctx, engine)); head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n", seq_printf(m, "\tHead request tail: %u\n",
head_req->tail); head_req->tail);
} }
...@@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) ...@@ -2313,12 +2308,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen == 6) if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine))); I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n", seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
...@@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) ...@@ -3168,7 +3163,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id; enum intel_engine_id id;
int j, ret; int j, ret;
if (!i915_semaphore_is_enabled(dev)) { if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n"); seq_puts(m, "Semaphores are disabled\n");
return 0; return 0;
} }
...@@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val) ...@@ -4769,7 +4764,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
i915_handle_error(dev, val, i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val); "Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -4919,7 +4914,7 @@ i915_drop_caches_set(void *data, u64 val)
} }
if (val & (DROP_RETIRE | DROP_ACTIVE)) if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND) if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
...@@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val) ...@@ -4993,7 +4988,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val; dev_priv->rps.max_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val) ...@@ -5060,7 +5055,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val; dev_priv->rps.min_freq_softlimit = val;
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
...@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SEMAPHORES: case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev); value = i915_semaphore_is_enabled(dev_priv);
break; break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1; value = 1;
...@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(); value = i915_cmd_parser_get_version(dev_priv);
break; break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1; value = 1;
...@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data, ...@@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV; return -ENODEV;
break; break;
case I915_PARAM_HAS_GPU_RESET: case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
intel_has_gpu_reset(dev);
break; break;
case I915_PARAM_HAS_RESOURCE_STREAMER: case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev); value = HAS_RESOURCE_STREAMER(dev);
...@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ...@@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch, .can_switch = i915_switcheroo_can_switch,
}; };
static void i915_gem_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. And the only
* known way to disable logical contexts is through a GPU reset.
*
* So in order to leave the system in a known default configuration,
* always reset the GPU upon unload. Afterwards we then clean up the
* GEM state tracking, flushing off the requests and leaving the
* system in a known idle state.
*
* Note that is of the upmost importance that the GPU is idle and
* all stray writes are flushed *before* we dismantle the backing
* storage for the pinned objects.
*
* However, since we are uncertain that reseting the GPU on older
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
if (HAS_HW_CONTEXTS(dev)) {
int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
WARN_ON(reset && reset != -ENODEV);
}
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(!list_empty(&to_i915(dev)->context_list));
}
static int i915_load_modeset_init(struct drm_device *dev) static int i915_load_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_client; goto cleanup_vga_client;
/* must happen before intel_power_domains_init_hw() on VLV/CHV */
intel_update_rawclk(dev_priv);
intel_power_domains_init_hw(dev_priv, false); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv); intel_csr_ucode_init(dev_priv);
...@@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -503,10 +542,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0; return 0;
cleanup_gem: cleanup_gem:
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
...@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("Display disabled (module parameter)\n"); DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0; info->num_pipes = 0;
} else if (info->num_pipes > 0 && } else if (info->num_pipes > 0 &&
(INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev)) { HAS_PCH_SPLIT(dev)) {
u32 fuse_strap = I915_READ(FUSE_STRAP); u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP);
...@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
DRM_INFO("PipeC fused off\n"); DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1; info->num_pipes -= 1;
} }
} else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM); u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0; u8 disabled_mask = 0;
bool invalid; bool invalid;
...@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9) else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev); gen9_sseu_info_init(dev);
/* Snooping is broken on BXT A stepping. */
info->has_snoop = !info->has_llc; info->has_snoop = !info->has_llc;
info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
/* Snooping is broken on BXT A stepping. */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
info->has_snoop = false;
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
...@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info->has_subslice_pg ? "y" : "n"); info->has_subslice_pg ? "y" : "n");
DRM_DEBUG_DRIVER("has EU power gating: %s\n", DRM_DEBUG_DRIVER("has EU power gating: %s\n",
info->has_eu_pg ? "y" : "n"); info->has_eu_pg ? "y" : "n");
i915.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
} }
static void intel_init_dpio(struct drm_i915_private *dev_priv) static void intel_init_dpio(struct drm_i915_private *dev_priv)
...@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, ...@@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, info, sizeof(dev_priv->info)); memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device; device_info->device_id = dev->pdev->device;
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->backlight_lock);
...@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) ...@@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (ret < 0) if (ret < 0)
goto put_bridge; goto put_bridge;
intel_uncore_init(dev); intel_uncore_init(dev_priv);
return 0; return 0;
...@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) ...@@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
intel_uncore_fini(dev); intel_uncore_fini(dev_priv);
i915_mmio_cleanup(dev); i915_mmio_cleanup(dev);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
} }
...@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */ /* overlay on gen2 is broken and can't address above 1G */
if (IS_GEN2(dev)) if (IS_GEN2(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
/* 965GM sometimes incorrectly writes to hardware status page (HWS) /* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located * using 32bit addressing, overwriting memory if HWS is located
...@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB, * behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully. * which also needs to be handled carefully.
*/ */
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
}
}
aperture_size = ggtt->mappable_end; aperture_size = ggtt->mappable_end;
...@@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1236,7 +1305,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE); PM_QOS_DEFAULT_VALUE);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
intel_opregion_setup(dev); intel_opregion_setup(dev);
...@@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) ...@@ -1300,7 +1369,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* Notify a valid surface after modesetting, * Notify a valid surface after modesetting,
* when running inside a VM. * when running inside a VM.
*/ */
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
i915_setup_sysfs(dev); i915_setup_sysfs(dev);
...@@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1459,10 +1528,7 @@ int i915_driver_unload(struct drm_device *dev)
flush_workqueue(dev_priv->wq); flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex); i915_gem_fini(dev);
i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv); intel_power_domains_fini(dev_priv);
...@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
......
...@@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = { ...@@ -298,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = {
static const struct intel_device_info intel_broadwell_d_info = { static const struct intel_device_info intel_broadwell_d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_m_info = { static const struct intel_device_info intel_broadwell_m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
}; };
static const struct intel_device_info intel_broadwell_gt3d_info = { static const struct intel_device_info intel_broadwell_gt3d_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .gen = 8,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
static const struct intel_device_info intel_broadwell_gt3m_info = { static const struct intel_device_info intel_broadwell_gt3m_info = {
BDW_FEATURES, BDW_FEATURES,
.gen = 8, .is_mobile = 1, .gen = 8, .is_mobile = 1,
.is_broadwell = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
}; };
...@@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -528,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch); pci_dev_put(pch);
} }
bool i915_semaphore_is_enabled(struct drm_device *dev) bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{ {
if (INTEL_INFO(dev)->gen < 6) if (INTEL_GEN(dev_priv) < 6)
return false; return false;
if (i915.semaphores >= 0) if (i915.semaphores >= 0)
...@@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -540,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists) if (i915.enable_execlists)
return false; return false;
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */ /* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false; return false;
#endif #endif
...@@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -608,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev); intel_display_suspend(dev);
...@@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -628,7 +628,7 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev, opregion_target_state); intel_opregion_notify_adapter(dev, opregion_target_state);
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
intel_opregion_fini(dev); intel_opregion_fini(dev);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
...@@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -775,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup) if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev); dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev); intel_dp_mst_resume(dev);
...@@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -868,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret); ret);
intel_uncore_early_sanitize(dev, true); intel_uncore_early_sanitize(dev_priv, true);
if (IS_BROXTON(dev)) { if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle) if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv); gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv); bxt_disable_dc9(dev_priv);
...@@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -878,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
} }
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) || if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
...@@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev) ...@@ -921,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state * - re-init interrupt state
* - re-init display * - re-init display
*/ */
int i915_reset(struct drm_device *dev) int i915_reset(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_device *dev = dev_priv->dev;
struct i915_gpu_error *error = &dev_priv->gpu_error; struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter; unsigned reset_counter;
int ret; int ret;
intel_reset_gt_powersave(dev); intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
...@@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev) ...@@ -944,7 +944,7 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev); i915_gem_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES); ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
/* Also reset the gpu hangman. */ /* Also reset the gpu hangman. */
if (error->stop_rings != 0) { if (error->stop_rings != 0) {
...@@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev) ...@@ -999,7 +999,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset. * of re-init after reset.
*/ */
if (INTEL_INFO(dev)->gen > 5) if (INTEL_INFO(dev)->gen > 5)
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
return 0; return 0;
...@@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev) ...@@ -1107,6 +1107,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
/* freeze: before creating the hibernation_image */
static int i915_pm_freeze(struct device *dev)
{
return i915_pm_suspend(dev);
}
static int i915_pm_freeze_late(struct device *dev)
{
int ret;
ret = i915_pm_suspend_late(dev);
if (ret)
return ret;
ret = i915_gem_freeze_late(dev_to_i915(dev));
if (ret)
return ret;
return 0;
}
/* thaw: called after creating the hibernation image, but before turning off. */
static int i915_pm_thaw_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_thaw(struct device *dev)
{
return i915_pm_resume(dev);
}
/* restore: called after loading the hibernation image. */
static int i915_pm_restore_early(struct device *dev)
{
return i915_pm_resume_early(dev);
}
static int i915_pm_restore(struct device *dev)
{
return i915_pm_resume(dev);
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
...@@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1470,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV; return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
...@@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1509,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device)
intel_guc_suspend(dev); intel_guc_suspend(dev);
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0; ret = 0;
...@@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1531,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device)
return ret; return ret;
} }
intel_uncore_forcewake_reset(dev, false); intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
...@@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device) ...@@ -1612,7 +1655,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM). * we can do is to hope that things will still work (and disable RPM).
*/ */
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev); gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
...@@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device) ...@@ -1624,7 +1667,7 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv); intel_hpd_init(dev_priv);
intel_enable_gt_powersave(dev); intel_enable_gt_powersave(dev_priv);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
...@@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = { ...@@ -1661,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the * @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE] * hibernation image [PMSG_RESTORE]
*/ */
.freeze = i915_pm_suspend, .freeze = i915_pm_freeze,
.freeze_late = i915_pm_suspend_late, .freeze_late = i915_pm_freeze_late,
.thaw_early = i915_pm_resume_early, .thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_resume, .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend, .poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late, .poweroff_late = i915_pm_poweroff_late,
.restore_early = i915_pm_resume_early, .restore_early = i915_pm_restore_early,
.restore = i915_pm_resume, .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */ /* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend, .runtime_suspend = intel_runtime_suspend,
......
此差异已折叠。
此差异已折叠。
...@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) { if (obj == NULL) {
int ret; int ret;
obj = i915_gem_alloc_object(pool->dev, size); obj = i915_gem_object_create(pool->dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)
......
...@@ -90,6 +90,8 @@ ...@@ -90,6 +90,8 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
/* This is a HW constraint. The value below is the largest known requirement /* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping * I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is. * part. It should be safe to decrease this, but it's more future proof as is.
...@@ -97,28 +99,27 @@ ...@@ -97,28 +99,27 @@
#define GEN6_CONTEXT_ALIGN (64<<10) #define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096 #define GEN7_CONTEXT_ALIGN 4096
static size_t get_context_alignment(struct drm_device *dev) static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{ {
if (IS_GEN6(dev)) if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN; return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN;
} }
static int get_context_size(struct drm_device *dev) static int get_context_size(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
u32 reg; u32 reg;
switch (INTEL_INFO(dev)->gen) { switch (INTEL_GEN(dev_priv)) {
case 6: case 6:
reg = I915_READ(CXT_SIZE); reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break; break;
case 7: case 7:
reg = I915_READ(GEN7_CXT_SIZE); reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev)) if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE; ret = HSW_CXT_TOTAL_SIZE;
else else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
...@@ -169,6 +170,8 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -169,6 +170,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ctx->legacy_hw_ctx.rcs_state) if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link); list_del(&ctx->link);
ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
kfree(ctx); kfree(ctx);
} }
...@@ -178,9 +181,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) ...@@ -178,9 +181,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
/* /*
* Try to make the context utilize L3 as well as LLC. * Try to make the context utilize L3 as well as LLC.
...@@ -209,6 +212,28 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) ...@@ -209,6 +212,28 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj; return obj;
} }
static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
ret = ida_simple_get(&dev_priv->context_hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
* Flush any pending retires to hopefully release some
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
ret = ida_simple_get(&dev_priv->context_hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
return ret;
}
*out = ret;
return 0;
}
static struct intel_context * static struct intel_context *
__create_hw_context(struct drm_device *dev, __create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv) struct drm_i915_file_private *file_priv)
...@@ -221,6 +246,12 @@ __create_hw_context(struct drm_device *dev, ...@@ -221,6 +246,12 @@ __create_hw_context(struct drm_device *dev,
if (ctx == NULL) if (ctx == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = assign_hw_id(dev_priv, &ctx->hw_id);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
kref_init(&ctx->ref); kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list); list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv; ctx->i915 = dev_priv;
...@@ -249,7 +280,7 @@ __create_hw_context(struct drm_device *dev, ...@@ -249,7 +280,7 @@ __create_hw_context(struct drm_device *dev,
/* NB: Mark all slices as needing a remap so that when the context first /* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there * loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */ * is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; ctx->remap_slice = ALL_L3_SLICES(dev_priv);
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
...@@ -288,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev, ...@@ -288,7 +319,7 @@ i915_gem_create_context(struct drm_device *dev,
* context. * context.
*/ */
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0); get_context_alignment(to_i915(dev)), 0);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy; goto err_destroy;
...@@ -336,7 +367,6 @@ static void i915_gem_context_unpin(struct intel_context *ctx, ...@@ -336,7 +367,6 @@ static void i915_gem_context_unpin(struct intel_context *ctx,
void i915_gem_context_reset(struct drm_device *dev) void i915_gem_context_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (i915.enable_execlists) { if (i915.enable_execlists) {
struct intel_context *ctx; struct intel_context *ctx;
...@@ -345,17 +375,7 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -345,17 +375,7 @@ void i915_gem_context_reset(struct drm_device *dev)
intel_lr_context_reset(dev_priv, ctx); intel_lr_context_reset(dev_priv, ctx);
} }
for (i = 0; i < I915_NUM_ENGINES; i++) { i915_gem_context_lost(dev_priv);
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
/* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
} }
int i915_gem_context_init(struct drm_device *dev) int i915_gem_context_init(struct drm_device *dev)
...@@ -368,19 +388,25 @@ int i915_gem_context_init(struct drm_device *dev) ...@@ -368,19 +388,25 @@ int i915_gem_context_init(struct drm_device *dev)
if (WARN_ON(dev_priv->kernel_context)) if (WARN_ON(dev_priv->kernel_context))
return 0; return 0;
if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL; return -EINVAL;
} }
} }
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->context_hw_ida);
if (i915.enable_execlists) { if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own /* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */ * backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0; dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev)) { } else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); dev_priv->hw_context_size =
round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) { if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size); dev_priv->hw_context_size);
...@@ -403,61 +429,35 @@ int i915_gem_context_init(struct drm_device *dev) ...@@ -403,61 +429,35 @@ int i915_gem_context_init(struct drm_device *dev)
return 0; return 0;
} }
void i915_gem_context_fini(struct drm_device *dev) void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine;
struct intel_context *dctx = dev_priv->kernel_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
intel_gpu_reset(dev, ALL_ENGINES);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->engine[RCS].last_context);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); for_each_engine(engine, dev_priv) {
} if (engine->last_context == NULL)
continue;
for (i = I915_NUM_ENGINES; --i >= 0;) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine);
i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL;
engine->last_context = NULL;
}
} }
i915_gem_context_unreference(dctx); /* Force the GPU state to be reinitialised on enabling */
dev_priv->kernel_context = NULL; dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv);
} }
int i915_gem_context_enable(struct drm_i915_gem_request *req) void i915_gem_context_fini(struct drm_device *dev)
{ {
struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; struct intel_context *dctx = dev_priv->kernel_context;
if (i915.enable_execlists) {
if (engine->init_context == NULL)
return 0;
ret = engine->init_context(req); if (dctx->legacy_hw_ctx.rcs_state)
} else i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
ret = i915_switch_context(req);
if (ret) { i915_gem_context_unreference(dctx);
DRM_ERROR("ring init context: %d\n", ret); dev_priv->kernel_context = NULL;
return ret;
}
return 0; ida_destroy(&dev_priv->context_hw_ida);
} }
static int context_idr_cleanup(int id, void *p, void *data) static int context_idr_cleanup(int id, void *p, void *data)
...@@ -510,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) ...@@ -510,12 +510,13 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{ {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT; u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings = const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */ /* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(engine->dev) ? i915_semaphore_is_enabled(dev_priv) ?
hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0; 0;
int len, ret; int len, ret;
...@@ -524,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -524,21 +525,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in * explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch. * itlb_before_ctx_switch.
*/ */
if (IS_GEN6(engine->dev)) { if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret) if (ret)
return ret; return ret;
} }
/* These flags are for resource streamer on HSW+ */ /* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
else if (INTEL_INFO(engine->dev)->gen < 8) else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4; len = 4;
if (INTEL_INFO(engine->dev)->gen >= 7) if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0); len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len); ret = intel_ring_begin(req, len);
...@@ -546,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -546,14 +547,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret; return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(engine->dev)->gen >= 7) { if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) { if (num_rings) {
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) { for_each_engine(signaller, dev_priv) {
if (signaller == engine) if (signaller == engine)
continue; continue;
...@@ -576,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -576,14 +577,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
*/ */
intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_NOOP);
if (INTEL_INFO(engine->dev)->gen >= 7) { if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) { if (num_rings) {
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */ i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine, intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings)); MI_LOAD_REGISTER_IMM(num_rings));
for_each_engine(signaller, to_i915(engine->dev)) { for_each_engine(signaller, dev_priv) {
if (signaller == engine) if (signaller == engine)
continue; continue;
...@@ -609,7 +610,37 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -609,7 +610,37 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret; return ret;
} }
static inline bool skip_rcs_switch(struct intel_engine_cs *engine, static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
struct intel_engine_cs *engine = req->engine;
int i, ret;
if (!remap_info)
return 0;
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (ret)
return ret;
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
intel_ring_emit(engine, remap_info[i]);
}
intel_ring_emit(engine, MI_NOOP);
intel_ring_advance(engine);
return 0;
}
static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *engine,
struct intel_context *to) struct intel_context *to)
{ {
if (to->remap_slice) if (to->remap_slice)
...@@ -618,36 +649,44 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine, ...@@ -618,36 +649,44 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
if (!to->legacy_hw_ctx.initialized) if (!to->legacy_hw_ctx.initialized)
return false; return false;
if (to->ppgtt && if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return false; return false;
return to == engine->last_context; return to == engine->last_context;
} }
static bool static bool
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *engine,
struct intel_context *to)
{ {
if (!to->ppgtt) if (!ppgtt)
return false; return false;
/* Always load the ppgtt on first use */
if (!engine->last_context)
return true;
/* Same context without new entries, skip */
if (engine->last_context == to && if (engine->last_context == to &&
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false; return false;
if (engine->id != RCS) if (engine->id != RCS)
return true; return true;
if (INTEL_INFO(engine->dev)->gen < 8) if (INTEL_GEN(engine->i915) < 8)
return true; return true;
return false; return false;
} }
static bool static bool
needs_pd_load_post(struct intel_context *to, u32 hw_flags) needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
struct intel_context *to,
u32 hw_flags)
{ {
if (!to->ppgtt) if (!ppgtt)
return false; return false;
if (!IS_GEN8(to->i915)) if (!IS_GEN8(to->i915))
...@@ -663,16 +702,17 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -663,16 +702,17 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
{ {
struct intel_context *to = req->ctx; struct intel_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct intel_context *from; struct intel_context *from;
u32 hw_flags; u32 hw_flags;
int ret, i; int ret, i;
if (skip_rcs_switch(engine, to)) if (skip_rcs_switch(ppgtt, engine, to))
return 0; return 0;
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(engine->dev), get_context_alignment(engine->i915),
0); 0);
if (ret) if (ret)
return ret; return ret;
...@@ -698,13 +738,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -698,13 +738,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (ret) if (ret)
goto unpin_out; goto unpin_out;
if (needs_pd_load_pre(engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load * "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting * Register Immediate commands in Ring Buffer before submitting
* a context."*/ * a context."*/
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
} }
...@@ -715,16 +755,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -715,16 +755,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* space. This means we must enforce that a page table load * space. This means we must enforce that a page table load
* occur when this occurs. */ * occur when this occurs. */
hw_flags = MI_RESTORE_INHIBIT; hw_flags = MI_RESTORE_INHIBIT;
else if (to->ppgtt && else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE; hw_flags = MI_FORCE_RESTORE;
else else
hw_flags = 0; hw_flags = 0;
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(engine, to) &&
needs_pd_load_post(to, hw_flags));
if (to != from || (hw_flags & MI_FORCE_RESTORE)) { if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags); ret = mi_set_context(req, hw_flags);
if (ret) if (ret)
...@@ -759,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -759,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been /* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them. * setup, and we do not wish to move them.
*/ */
if (needs_pd_load_post(to, hw_flags)) { if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't /* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail * actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has * here. Still, let the user know something dangerous has
...@@ -771,14 +806,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -771,14 +806,14 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret; return ret;
} }
if (to->ppgtt) if (ppgtt)
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) { for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i))) if (!(to->remap_slice & (1<<i)))
continue; continue;
ret = i915_gem_l3_remap(req, i); ret = remap_l3(req, i);
if (ret) if (ret)
return ret; return ret;
...@@ -825,17 +860,18 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -825,17 +860,18 @@ int i915_switch_context(struct drm_i915_gem_request *req)
if (engine->id != RCS || if (engine->id != RCS ||
req->ctx->legacy_hw_ctx.rcs_state == NULL) { req->ctx->legacy_hw_ctx.rcs_state == NULL) {
struct intel_context *to = req->ctx; struct intel_context *to = req->ctx;
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
if (needs_pd_load_pre(engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
int ret; int ret;
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
return ret; return ret;
/* Doing a PD load always reloads the page dirs */ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
} }
if (to != engine->last_context) { if (to != engine->last_context) {
...@@ -1004,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, ...@@ -1004,3 +1040,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
} }
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_reset_stats *args = data;
struct i915_ctx_hang_stats *hs;
struct intel_context *ctx;
int ret;
if (args->flags || args->pad)
return -EINVAL;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
args->batch_active = hs->batch_active;
args->batch_pending = hs->batch_pending;
mutex_unlock(&dev->struct_mutex);
return 0;
}
...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -154,7 +154,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(to_i915(dev));
goto search_again; goto search_again;
} }
...@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) ...@@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret) if (ret)
return ret; return ret;
i915_gem_retire_requests(vm->dev); i915_gem_retire_requests(to_i915(vm->dev));
WARN_ON(!list_empty(&vm->active_list)); WARN_ON(!list_empty(&vm->active_list));
} }
......
...@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, ...@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm; struct i915_address_space *vm;
struct list_head ordered_vmas; struct list_head ordered_vmas;
struct list_head pinned_vmas; struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry; int retry;
i915_gem_retire_requests_ring(engine); i915_gem_retire_requests_ring(engine);
...@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, ...@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
} }
if (flush_chipset) if (flush_chipset)
i915_gem_chipset_flush(req->engine->dev); i915_gem_chipset_flush(req->engine->i915);
if (flush_domains & I915_GEM_DOMAIN_GTT) if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb(); wmb();
...@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, ...@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
if (i915.enable_execlists && !ctx->engine[engine->id].state) {
int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
}
}
return ctx; return ctx;
} }
...@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, ...@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req); i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }
......
...@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page) ...@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int i; int i;
if (obj->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
...@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT; int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
...@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
} }
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) for_each_sgt_page(page, sgt_iter, obj->pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj->bit_17); __clear_bit(i, obj->bit_17);
......
...@@ -93,6 +93,13 @@ ...@@ -93,6 +93,13 @@
* *
*/ */
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, base);
}
static int static int
i915_get_ggtt_vma_pages(struct i915_vma *vma); i915_get_ggtt_vma_pages(struct i915_vma *vma);
...@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { ...@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED, .type = I915_GGTT_VIEW_ROTATED,
}; };
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{ {
bool has_aliasing_ppgtt; bool has_aliasing_ppgtt;
bool has_full_ppgtt; bool has_full_ppgtt;
bool has_full_48bit_ppgtt; bool has_full_48bit_ppgtt;
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; has_full_48bit_ppgtt =
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
has_full_ppgtt = false; /* emulation is too hard */ has_full_ppgtt = false; /* emulation is too hard */
if (!has_aliasing_ppgtt)
return 0;
/* /*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for * We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work. * execlists, the sole mechanism available to submit work.
*/ */
if (INTEL_INFO(dev)->gen < 9 && if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
(enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0; return 0;
if (enable_ppgtt == 1) if (enable_ppgtt == 1)
...@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */ /* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n"); DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0; return 0;
} }
#endif #endif
/* Early VLV doesn't have this */ /* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0; return 0;
} }
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
return has_full_48bit_ppgtt ? 3 : 2; return has_full_48bit_ppgtt ? 3 : 2;
else else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
...@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev, ...@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
static int gen8_init_scratch(struct i915_address_space *vm) static int gen8_init_scratch(struct i915_address_space *vm)
{ {
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
int ret;
vm->scratch_page = alloc_scratch_page(dev); vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page)) if (IS_ERR(vm->scratch_page))
...@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pt = alloc_pt(dev); vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) { if (IS_ERR(vm->scratch_pt)) {
free_scratch_page(dev, vm->scratch_page); ret = PTR_ERR(vm->scratch_pt);
return PTR_ERR(vm->scratch_pt); goto free_scratch_page;
} }
vm->scratch_pd = alloc_pd(dev); vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) { if (IS_ERR(vm->scratch_pd)) {
free_pt(dev, vm->scratch_pt); ret = PTR_ERR(vm->scratch_pd);
free_scratch_page(dev, vm->scratch_page); goto free_pt;
return PTR_ERR(vm->scratch_pd);
} }
if (USES_FULL_48BIT_PPGTT(dev)) { if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev); vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) { if (IS_ERR(vm->scratch_pdp)) {
free_pd(dev, vm->scratch_pd); ret = PTR_ERR(vm->scratch_pdp);
free_pt(dev, vm->scratch_pt); goto free_pd;
free_scratch_page(dev, vm->scratch_page);
return PTR_ERR(vm->scratch_pdp);
} }
} }
...@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm) ...@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pdp(vm, vm->scratch_pdp); gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0; return 0;
free_pd:
free_pd(dev, vm->scratch_pd);
free_pt:
free_pt(dev, vm->scratch_pt);
free_scratch_page:
free_scratch_page(dev, vm->scratch_page);
return ret;
} }
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
...@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) ...@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(vm->dev)) if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false); gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
...@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0, 0, 0,
GEN8_PML4E_SHIFT); GEN8_PML4E_SHIFT);
if (intel_vgpu_active(ppgtt->base.dev)) { if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt); ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret) if (ret)
goto free_scratch; goto free_scratch;
} }
} }
if (intel_vgpu_active(ppgtt->base.dev)) if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true); gen8_ppgtt_notify_vgt(ppgtt, true);
return 0; return 0;
...@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level, u32 flags) enum i915_cache_level cache_level, u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES;
struct sg_page_iter sg_iter; gen6_pte_t *pt_vaddr = NULL;
struct sgt_iter sgt_iter;
dma_addr_t addr;
pt_vaddr = NULL; for_each_sgt_dma(addr, sgt_iter, pages) {
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL) if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] = pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter), vm->pte_encode(addr, cache_level, true, flags);
cache_level, true, flags);
if (++act_pte == GEN6_PTES) { if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
...@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, ...@@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
act_pte = 0; act_pte = 0;
} }
} }
if (pt_vaddr) if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
} }
...@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else } else
BUG(); BUG();
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
ppgtt->switch_mm = vgpu_mm_switch; ppgtt->switch_mm = vgpu_mm_switch;
ret = gen6_ppgtt_alloc(ppgtt); ret = gen6_ppgtt_alloc(ppgtt);
...@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev) ...@@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
} }
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0; int ret = 0;
...@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev) ...@@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
return 0; return 0;
} }
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
{
struct drm_i915_private *dev_priv = req->i915;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
if (i915.enable_execlists)
return 0;
if (!ppgtt)
return 0;
return ppgtt->switch_mm(ppgtt, req);
}
struct i915_hw_ppgtt * struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{ {
...@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) ...@@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
void i915_check_and_clear_faults(struct drm_device *dev) void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev_priv)->gen < 6)
return; return;
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
...@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) ...@@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return; return;
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true); true);
...@@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 unused) enum i915_cache_level level, u32 unused)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries = gen8_pte_t __iomem *gtt_entries;
(gen8_pte_t __iomem *)ggtt->gsm + first_entry; gen8_pte_t gtt_entry;
int i = 0; dma_addr_t addr;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
int rpm_atomic_seq; int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT); for_each_sgt_dma(addr, sgt_iter, st) {
gen8_set_pte(&gtt_entries[i], gtt_entry = gen8_pte_encode(addr, level, true);
gen8_pte_encode(addr, level, true)); gen8_set_pte(&gtt_entries[i++], gtt_entry);
i++;
} }
/* /*
...@@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* hardware should work, we must keep this posting read for paranoia. * hardware should work, we must keep this posting read for paranoia.
*/ */
if (i != 0) if (i != 0)
WARN_ON(readq(&gtt_entries[i-1]) WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
!= gen8_pte_encode(addr, level, true));
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
...@@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 flags) enum i915_cache_level level, u32 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; struct sgt_iter sgt_iter;
gen6_pte_t __iomem *gtt_entries = gen6_pte_t __iomem *gtt_entries;
(gen6_pte_t __iomem *)ggtt->gsm + first_entry; gen6_pte_t gtt_entry;
int i = 0; dma_addr_t addr;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
int rpm_atomic_seq; int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); for_each_sgt_dma(addr, sgt_iter, st) {
i++; gtt_entry = vm->pte_encode(addr, level, true, flags);
iowrite32(gtt_entry, &gtt_entries[i++]);
} }
/* XXX: This serves as a posting read to make sure that the PTE has /* XXX: This serves as a posting read to make sure that the PTE has
...@@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume * of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia. * hardware should work, we must keep this posting read for paranoia.
*/ */
if (i != 0) { if (i != 0)
unsigned long gtt = readl(&gtt_entries[i-1]); WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
}
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
...@@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
} }
static void nop_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm, static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start, uint64_t start,
uint64_t length, uint64_t length,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base = gen8_pte_t scratch_pte, __iomem *gtt_base =
...@@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, ...@@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = to_i915(vm->dev); struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base = gen6_pte_t scratch_pte, __iomem *gtt_base =
...@@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(&ggtt->base, dev_priv); i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE; ggtt->base.total += PAGE_SIZE;
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev_priv)) {
ret = intel_vgt_balloon(dev); ret = intel_vgt_balloon(dev);
if (ret) if (ret)
return ret; return ret;
...@@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) ...@@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) { if (drm_mm_initialized(&ggtt->base.mm)) {
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev_priv))
intel_vgt_deballoon(); intel_vgt_deballoon();
drm_mm_takedown(&ggtt->base.mm); drm_mm_takedown(&ggtt->base.mm);
...@@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ...@@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size); ret = ggtt_probe_common(dev, ggtt->size);
ggtt->base.clear_range = gen8_ggtt_clear_range;
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
else
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv))
ggtt->base.clear_range = gen8_ggtt_clear_range;
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret; return ret;
} }
...@@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev) ...@@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
if (intel_iommu_gfx_mapped) if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n"); DRM_INFO("VT-d active for gfx access\n");
#endif #endif
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0; return 0;
...@@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
bool flush;
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
...@@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* Cache flush objects bound into GGTT and rebind them. */ /* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base) if (vma->vm != &ggtt->base)
continue; continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level, WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE)); PIN_UPDATE));
flush = true;
} }
if (flush) if (obj->pin_display)
i915_gem_clflush_object(obj, obj->pin_display); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
} }
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
...@@ -3398,9 +3394,11 @@ static struct sg_table * ...@@ -3398,9 +3394,11 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
const size_t n_pages = obj->base.size / PAGE_SIZE;
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv; unsigned int size_pages_uv;
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
unsigned long i; unsigned long i;
dma_addr_t *page_addr_list; dma_addr_t *page_addr_list;
struct sg_table *st; struct sg_table *st;
...@@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, ...@@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM; int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */ /* Allocate a temporary list of source pages for random access. */
page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, page_addr_list = drm_malloc_gfp(n_pages,
sizeof(dma_addr_t), sizeof(dma_addr_t),
GFP_TEMPORARY); GFP_TEMPORARY);
if (!page_addr_list) if (!page_addr_list)
...@@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, ...@@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */ /* Populate source page list from the object. */
i = 0; i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); page_addr_list[i++] = dma_addr;
i++;
}
GEM_BUG_ON(i != n_pages);
st->nents = 0; st->nents = 0;
sg = st->sgl; sg = st->sgl;
...@@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, ...@@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size; return obj->base.size;
} }
} }
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
void __iomem *ptr;
lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (WARN_ON(!vma->obj->map_and_fenceable))
return ERR_PTR(-ENODEV);
GEM_BUG_ON(!vma->is_ggtt);
GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
ptr = vma->iomap;
if (ptr == NULL) {
ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
vma->node.start,
vma->node.size);
if (ptr == NULL)
return ERR_PTR(-ENOMEM);
vma->iomap = ptr;
}
vma->pin_count++;
return ptr;
}
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__ #ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
struct drm_i915_file_private; struct drm_i915_file_private;
typedef uint32_t gen6_pte_t; typedef uint32_t gen6_pte_t;
...@@ -175,6 +177,7 @@ struct i915_vma { ...@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node; struct drm_mm_node node;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
void __iomem *iomap;
/** Flags and address space this VMA is bound to */ /** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0) #define GLOBAL_BIND (1<<0)
...@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev); ...@@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev); int i915_ppgtt_init_hw(struct drm_device *dev);
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref); void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv); struct drm_i915_file_private *fpriv);
...@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) ...@@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release); kref_put(&ppgtt->ref, i915_ppgtt_release);
} }
void i915_check_and_clear_faults(struct drm_device *dev); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
...@@ -560,4 +561,36 @@ size_t ...@@ -560,4 +561,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj, i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
/**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap
*
* The passed in VMA has to be pinned in the global GTT mappable region.
* An extra pinning of the VMA is acquired for the return iomapping,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
* Callers must hold the struct_mutex.
*
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
/**
* i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
* @vma: VMA to unpin
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
* Callers must hold the struct_mutex. This function is only valid to be
* called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
*/
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->dev->struct_mutex);
GEM_BUG_ON(vma->pin_count == 0);
GEM_BUG_ON(vma->iomap == NULL);
vma->pin_count--;
}
#endif #endif
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "intel_renderstate.h" #include "intel_renderstate.h"
static const struct intel_renderstate_rodata * static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen) render_state_get_rodata(const int gen)
{ {
switch (gen) { switch (gen) {
case 6: case 6:
...@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen) ...@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL; return NULL;
} }
static int render_state_init(struct render_state *so, struct drm_device *dev) static int render_state_init(struct render_state *so,
struct drm_i915_private *dev_priv)
{ {
int ret; int ret;
so->gen = INTEL_INFO(dev)->gen; so->gen = INTEL_GEN(dev_priv);
so->rodata = render_state_get_rodata(dev, so->gen); so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL) if (so->rodata == NULL)
return 0; return 0;
if (so->rodata->batch_items * 4 > 4096) if (so->rodata->batch_items * 4 > 4096)
return -EINVAL; return -EINVAL;
so->obj = i915_gem_alloc_object(dev, 4096); so->obj = i915_gem_object_create(dev_priv->dev, 4096);
if (so->obj == NULL) if (IS_ERR(so->obj))
return -ENOMEM; return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret) if (ret)
...@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, ...@@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS)) if (WARN_ON(engine->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(so, engine->dev); ret = render_state_init(so, engine->i915);
if (ret) if (ret)
return ret; return ret;
......
...@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0; unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags); trace_i915_gem_shrink(dev_priv, target, flags);
i915_gem_retire_requests(dev_priv->dev); i915_gem_retire_requests(dev_priv);
/*
* Unbinding of objects will require HW access; Let us not wake the
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
if ((flags & I915_SHRINK_BOUND) &&
!intel_runtime_pm_get_if_in_use(dev_priv))
flags &= ~I915_SHRINK_BOUND;
/* /*
* As we may completely rewrite the (un)bound list whilst unbinding * As we may completely rewrite the (un)bound list whilst unbinding
...@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list); list_splice(&still_in_list, phase->list);
} }
i915_gem_retire_requests(dev_priv->dev); if (flags & I915_SHRINK_BOUND)
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
return count; return count;
} }
...@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
...@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier); container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu; struct shrinker_lock_uninterruptible slu;
unsigned long freed_pages; struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE; return NOTIFY_DONE;
freed_pages = i915_gem_shrink(dev_priv, -1UL, /* Force everything onto the inactive lists */
I915_SHRINK_BOUND | ret = i915_gpu_idle(dev_priv->dev);
I915_SHRINK_UNBOUND | if (ret)
I915_SHRINK_ACTIVE | goto out;
I915_SHRINK_VMAPS);
intel_runtime_pm_get(dev_priv);
freed_pages += i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
intel_runtime_pm_put(dev_priv);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
&dev_priv->ggtt.base.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count;
}
out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages; *(unsigned long *)ptr += freed_pages;
......
...@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, ...@@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
/* See the comment at the drm_mm_init() call for more about this check. /* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) if (IS_GEN8(dev_priv) && start < 4096)
start = 4096; start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
...@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) { if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm; u32 bsm;
pci_read_config_dword(dev->pdev, BSM, &bsm); pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
base = bsm & BSM_MASK; base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) { } else if (IS_I865G(dev)) {
u16 toud = 0; u16 toud = 0;
......
...@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4) if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true; return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) { if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
...@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
if (obj->map_and_fenceable && if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode)) !i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_ggtt_unbind(obj); ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) { if (ret == 0) {
if (obj->pages && if (obj->pages &&
......
...@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{ {
struct sg_page_iter sg_iter; struct sgt_iter sgt_iter;
struct page *page;
BUG_ON(obj->userptr.work != NULL); BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
...@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_object(obj);
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { for_each_sgt_page(page, sgt_iter, obj->pages) {
struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty) if (obj->dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file ...@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0; return 0;
} }
int void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
i915_gem_init_userptr(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock); mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs); hash_init(dev_priv->mm_structs);
return 0;
} }
...@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
} }
if (INTEL_INFO(dev)->gen == 7) if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) for (i = 0; i < ARRAY_SIZE(error->ring); i++)
...@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, ...@@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code; return error_code;
} }
static void i915_gem_record_fences(struct drm_device *dev, static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
if (IS_GEN3(dev) || IS_GEN2(dev)) { if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i)); error->fence[i] = I915_READ(FENCE_REG(i));
} else if (IS_GEN5(dev) || IS_GEN4(dev)) { } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
} else if (INTEL_INFO(dev)->gen >= 6) { } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++) for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
} }
...@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to; struct intel_engine_cs *to;
enum intel_engine_id id; enum intel_engine_id id;
if (!i915_semaphore_is_enabled(dev_priv->dev)) if (!i915_semaphore_is_enabled(dev_priv))
return; return;
if (!error->semaphore_obj) if (!error->semaphore_obj)
...@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
} }
} }
static void i915_record_ring_state(struct drm_device *dev, static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_GEN(dev_priv) >= 6) {
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine, gen8_record_semaphore_state(dev_priv, error, engine,
ering); ering);
else else
gen6_record_semaphore_state(dev_priv, engine, ering); gen6_record_semaphore_state(dev_priv, engine, ering);
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
} }
...@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->tail = I915_READ_TAIL(engine); ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine); ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev_priv)) {
switch (engine->id) { switch (engine->id) {
default: default:
case RCS: case RCS:
...@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7; mmio = VEBOX_HWS_PGA_GEN7;
break; break;
} }
} else if (IS_GEN6(engine->dev)) { } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base); mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else { } else {
/* XXX: gen8 returns to sanity */ /* XXX: gen8 returns to sanity */
...@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action; ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) { if (USES_PPGTT(dev_priv)) {
int i; int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev)) if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine)); I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev)) else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base = ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine)); I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8) else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] = ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i)); I915_READ(GEN8_RING_PDP_UDW(engine, i));
...@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering) struct drm_i915_error_ring *ering)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */ /* Currently render ring is the only HW context user */
...@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
} }
} }
static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
...@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].pid = -1; error->ring[i].pid = -1;
if (engine->dev == NULL) if (!intel_engine_initialized(engine))
continue; continue;
error->ring[i].valid = true; error->ring[i].valid = true;
i915_record_ring_state(dev, error, engine, &error->ring[i]); i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
if (request) { if (request) {
...@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, ...@@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR); error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER); error->pgtbl_er = I915_READ(PGTBL_ER);
i915_get_extra_instdone(dev, error->extra_instdone); i915_get_extra_instdone(dev_priv, error->extra_instdone);
} }
static void i915_error_capture_msg(struct drm_device *dev, static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error, struct drm_i915_error_state *error,
u32 engine_mask, u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode; u32 ecode;
int ring_id = -1, len; int ring_id = -1, len;
...@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev, ...@@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg), len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x", "GPU HANG: ecode %d:%d:0x%08x",
INTEL_INFO(dev)->gen, ring_id, ecode); INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1) if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len, len += scnprintf(error->error_msg + len,
...@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, ...@@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools * out a structure which becomes available in debugfs for user level tools
* to pick up. * to pick up.
*/ */
void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg) const char *error_msg)
{ {
static bool warned; static bool warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
unsigned long flags; unsigned long flags;
...@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, ...@@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error); i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error); i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error); i915_gem_capture_buffers(dev_priv, error);
i915_gem_record_fences(dev, error); i915_gem_record_fences(dev_priv, error);
i915_gem_record_rings(dev, error); i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time); do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev); error->overlay = intel_overlay_capture_error_state(dev_priv);
error->display = intel_display_capture_error_state(dev); error->display = intel_display_capture_error_state(dev_priv);
i915_error_capture_msg(dev, error, engine_mask, error_msg); i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg); DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
...@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, ...@@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
warned = true; warned = true;
} }
} }
...@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) ...@@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
} }
/* NB: please notice the memset */ /* NB: please notice the memset */
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
uint32_t *instdone)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
if (IS_GEN2(dev) || IS_GEN3(dev)) if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE); instdone[0] = I915_READ(GEN2_INSTDONE);
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1); instdone[1] = I915_READ(GEN4_INSTDONE1);
} else if (INTEL_INFO(dev)->gen >= 7) { } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
......
...@@ -67,11 +67,11 @@ ...@@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050) #define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) #define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
......
...@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, ...@@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev) || if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev))
NEEDS_WaRsDisableCoarsePowerGating(dev))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
...@@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, ...@@ -587,8 +586,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) if (IS_ERR(obj))
return NULL; return NULL;
if (i915_gem_object_get_pages(obj)) { if (i915_gem_object_get_pages(obj)) {
......
此差异已折叠。
...@@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = { ...@@ -58,6 +58,7 @@ struct i915_params i915 __read_mostly = {
.guc_log_level = -1, .guc_log_level = -1,
.enable_dp_mst = true, .enable_dp_mst = true,
.inject_load_failure = 0, .inject_load_failure = 0,
.enable_dpcd_backlight = false,
}; };
module_param_named(modeset, i915.modeset, int, 0400); module_param_named(modeset, i915.modeset, int, 0400);
...@@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst, ...@@ -210,3 +211,6 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure, MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight,
"Enable support for DPCD backlight control (default:false)");
...@@ -61,6 +61,7 @@ struct i915_params { ...@@ -61,6 +61,7 @@ struct i915_params {
bool verbose_state_checks; bool verbose_state_checks;
bool nuclear_pageflip; bool nuclear_pageflip;
bool enable_dp_mst; bool enable_dp_mst;
bool enable_dpcd_backlight;
}; };
extern struct i915_params i915 __read_mostly; extern struct i915_params i915 __read_mostly;
......
...@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells { ...@@ -2449,6 +2449,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
#define _FPA0 0x6040 #define _FPA0 0x6040
#define _FPA1 0x6044 #define _FPA1 0x6044
#define _FPB0 0x6048 #define _FPB0 0x6048
...@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells { ...@@ -6031,6 +6033,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080) #define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14) #define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4 #define _CHICKEN_PIPESL_1_B 0x420b4
...@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells { ...@@ -6089,7 +6092,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100) #define GEN8_L3SQCREG1 _MMIO(0xB100)
#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 /*
* Note that on CHV the following has an off-by-one error wrt. to BSpec.
* Using the formula in BSpec leads to a hang, while the formula here works
* fine and matches the formulas for all other platforms. A BSpec change
* request has been filed to clarify this.
*/
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
...@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells { ...@@ -7557,14 +7567,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26) #define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */ /* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010) #define LCPLL1_CTL _MMIO(0x46010)
......
...@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev, ...@@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev,
u64 units = 128ULL, div = 100000ULL; u64 units = 128ULL, div = 100000ULL;
u32 ret; u32 ret;
if (!intel_enable_rc6(dev)) if (!intel_enable_rc6())
return 0; return 0;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
...@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, ...@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_minor *dminor = dev_to_drm_minor(kdev); return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
} }
static ssize_t static ssize_t
...@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and /* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
...@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and /* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though * update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */ * frequency request may be unchanged. */
intel_set_rps(dev, val); intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
......
...@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = from->dev->primary->index; __entry->dev = from->i915->dev->primary->index;
__entry->sync_from = from->id; __entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id; __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req); __entry->seqno = i915_gem_request_get_seqno(req);
...@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags; __entry->flags = flags;
i915_trace_irq_get(engine, req); i915_trace_irq_get(req->engine, req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
...@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ...@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = req->engine->dev->primary->index; __entry->dev = req->i915->dev->primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->invalidate = invalidate; __entry->invalidate = invalidate;
__entry->flush = flush; __entry->flush = flush;
...@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
), ),
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
...@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify, ...@@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine); __entry->seqno = engine->get_seqno(engine);
), ),
...@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable. * less desirable.
*/ */
TP_fast_assign( TP_fast_assign(
struct intel_engine_cs *engine = __entry->dev = req->i915->dev->primary->index;
i915_gem_request_get_engine(req); __entry->ring = req->engine->id;
__entry->dev = engine->dev->primary->index; __entry->seqno = req->seqno;
__entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking = __entry->blocking =
mutex_is_locked(&engine->dev->struct_mutex); mutex_is_locked(&req->i915->dev->struct_mutex);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
...@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, ...@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
__entry->dev = engine->dev->primary->index; __entry->dev = engine->i915->dev->primary->index;
), ),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
......
...@@ -58,15 +58,14 @@ ...@@ -58,15 +58,14 @@
* This function is called at the initialization stage, to detect whether * This function is called at the initialization stage, to detect whether
* running on a vGPU. * running on a vGPU.
*/ */
void i915_check_vgpu(struct drm_device *dev) void i915_check_vgpu(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic; uint64_t magic;
uint32_t version; uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
if (!IS_HASWELL(dev)) if (!IS_HASWELL(dev_priv))
return; return;
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
...@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm, ...@@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/** /**
* intel_vgt_balloon - balloon out reserved graphics address trunks * intel_vgt_balloon - balloon out reserved graphics address trunks
* @dev: drm device * @dev_priv: i915 device
* *
* This function is called at the initialization stage, to balloon out the * This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as * graphic address space allocated to other vGPUs, by marking these spaces as
......
...@@ -110,7 +110,7 @@ struct vgt_if { ...@@ -110,7 +110,7 @@ struct vgt_if {
#define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_NOT_READY 0
#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
extern void i915_check_vgpu(struct drm_device *dev); extern void i915_check_vgpu(struct drm_i915_private *dev_priv);
extern int intel_vgt_balloon(struct drm_device *dev); extern int intel_vgt_balloon(struct drm_device *dev);
extern void intel_vgt_deballoon(void); extern void intel_vgt_deballoon(void);
......
...@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev, ...@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev) static int i915_audio_component_get_cdclk_freq(struct device *dev)
{ {
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV; return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); return dev_priv->cdclk_freq;
ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret;
} }
static int i915_audio_component_sync_audio_rate(struct device *dev, static int i915_audio_component_sync_audio_rate(struct device *dev,
......
...@@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, ...@@ -318,6 +318,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return; return;
} }
dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
if (bdb->version >= 191 &&
get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
const struct bdb_lfp_backlight_control_method *method;
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
}
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness; dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
...@@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv, ...@@ -763,6 +772,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return; return;
} }
/*
* These fields are introduced from the VBT version 197 onwards,
* so making sure that these bits are set zero in the previous
* versions.
*/
if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
}
/* We have mandatory mipi config blocks. Initialize as generic panel */ /* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
} }
......
...@@ -30,6 +30,14 @@ ...@@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_ #ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_ #define _INTEL_BIOS_H_
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
INTEL_BACKLIGHT_LPSS,
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
};
struct edp_power_seq { struct edp_power_seq {
u16 t1_t3; u16 t1_t3;
u16 t8; u16 t8;
...@@ -113,7 +121,13 @@ struct mipi_config { ...@@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2; u16 dual_link:2;
u16 lane_cnt:2; u16 lane_cnt:2;
u16 pixel_overlap:3; u16 pixel_overlap:3;
u16 rsvd3:9; u16 rgb_flip:1;
#define DL_DCS_PORT_A 0x00
#define DL_DCS_PORT_C 0x01
#define DL_DCS_PORT_A_AND_C 0x02
u16 dl_dcs_cabc_ports:2;
u16 dl_dcs_backlight_ports:2;
u16 rsvd3:4;
u16 rsvd4; u16 rsvd4;
......
...@@ -41,16 +41,22 @@ ...@@ -41,16 +41,22 @@
* be moved to FW_FAILED. * be moved to FW_FAILED.
*/ */
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
...@@ -169,12 +175,10 @@ struct stepping_info { ...@@ -169,12 +175,10 @@ struct stepping_info {
char substepping; char substepping;
}; };
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'} {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'},
}; };
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version; csr->version = css_header->version;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_KABYLAKE(dev_priv)) {
required_min_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED; required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED; required_min_version = BXT_CSR_VERSION_REQUIRED;
...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) ...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv)) if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;
......
...@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, ...@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{ {
struct intel_shared_dpll *pll; struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state; struct intel_dpll_hw_state *state;
intel_clock_t clock; struct dpll clock;
/* For DDI ports we always use a shared PLL. */ /* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE)) if (WARN_ON(dpll == DPLL_ID_PRIVATE))
......
此差异已折叠。
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "intel_drv.h"
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
{
uint8_t reg_val = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
&reg_val) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_DISPLAY_CONTROL_REGISTER);
return;
}
if (enable)
reg_val |= DP_EDP_BACKLIGHT_ENABLE;
else
reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
reg_val) != 1) {
DRM_DEBUG_KMS("Failed to %s aux backlight\n",
enable ? "enable" : "disable");
}
}
/*
* Read the current backlight value from DPCD register(s) based
* on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
*/
static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t read_val[2] = { 0x0 };
uint16_t level = 0;
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
return 0;
}
level = read_val[0];
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
level = (read_val[0] << 8 | read_val[1]);
return level;
}
/*
* Sends the current backlight level over the aux channel, checking if its using
* 8-bit or 16 bit value (MSB and LSB)
*/
static void
intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t vals[2] = { 0x0 };
vals[0] = level;
/* Write the MSB and/or LSB */
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
vals[0] = (level & 0xFF00) >> 8;
vals[1] = (level & 0xFF);
}
if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
vals, sizeof(vals)) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight level\n");
return;
}
}
static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
uint8_t dpcd_buf = 0;
set_aux_backlight_enable(intel_dp, true);
if ((drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
(dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
}
static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
{
set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
}
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
struct intel_panel *panel = &connector->panel;
intel_dp_aux_enable_backlight(connector);
if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
panel->backlight.max = 0xFFFF;
else
panel->backlight.max = 0xFF;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
}
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
*/
if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
!((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
return true;
}
return false;
}
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
if (!i915.enable_dpcd_backlight)
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
panel->backlight.disable = intel_dp_aux_disable_backlight;
panel->backlight.set = intel_dp_aux_set_backlight;
panel->backlight.get = intel_dp_aux_get_backlight;
return 0;
}
此差异已折叠。
...@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, ...@@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock; int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) { if (encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock; struct dpll best_clock;
/* Calculate HDMI div */ /* Calculate HDMI div */
/* /*
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册