提交 e13af9a8 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2013-07-12' of...

Merge tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

 Highlights:
- follow-up refactoring after the shared dpll rework that landed in 3.11
- oddball prep cleanups from Ben for ppgtt
- encoder->get_config state tracking infrastructure from Jesse
- used by the experimental fastboot support from Jesse (disabled by
  default)
- make the error state file official and add it to our sysfs interface
  (Mika)
- drm_mm prep changes from Ben, prepares to embedd the drm_mm_node (which
  will be used by the vma rework later on)
- interrupt handling rework, follow up cleanups to the VECS enabling, hpd
  storm handling and fifo underrun reporting.
- Big pile of smaller cleanups, code improvements and related stuff.

* tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel: (72 commits)
  drm/i915: clear DPLL reg when disabling i9xx dplls
  drm/i915: Fix up cpt pixel multiplier enable sequence
  drm/i915: clean up vlv ->pre_pll_enable and pll enable sequence
  drm/i915: move error state to own compilation unit
  drm/i915: Don't attempt to read an unitialized stack value
  drm/i915: Use for_each_pipe() when possible
  drm/i915: don't enable PM_VEBOX_CS_ERROR_INTERRUPT
  drm/i915: unify ring irq refcounts (again)
  drm/i915: kill dev_priv->rps.lock
  drm/i915: queue work outside spinlock in hsw_pm_irq_handler
  drm/i915: streamline hsw_pm_irq_handler
  drm/i915: irq handlers don't need interrupt-safe spinlocks
  drm/i915: kill lpt pch transcoder->crtc mapping code for fifo underruns
  drm/i915: improve GEN7_ERR_INT clearing for fifo underrun reporting
  drm/i915: improve SERR_INT clearing for fifo underrun reporting
  drm/i915: extract ibx_display_interrupt_update
  drm/i915: remove unused members from drm_i915_private
  drm/i915: don't frob mm.suspended when not using ums
  drm/i915: Fix VLV DP RBR/HDMI/DAC PLL LPF coefficients
  drm/i915: WARN if the bios reserved range is bigger than stolen size
  ...

Conflicts:
	drivers/gpu/drm/i915/i915_gem.c
...@@ -147,33 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, ...@@ -147,33 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
} }
} }
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
unsigned long start,
unsigned long size,
bool atomic)
{ {
struct drm_mm_node *hole, *node; struct drm_mm_node *hole;
unsigned long end = start + size; unsigned long end = node->start + node->size;
unsigned long hole_start; unsigned long hole_start;
unsigned long hole_end; unsigned long hole_end;
BUG_ON(node == NULL);
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > start || hole_end < end) if (hole_start > node->start || hole_end < end)
continue; continue;
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
node->start = start;
node->size = size;
node->mm = mm; node->mm = mm;
node->allocated = 1; node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack); INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list); list_add(&node->node_list, &hole->node_list);
if (start == hole_start) { if (node->start == hole_start) {
hole->hole_follows = 0; hole->hole_follows = 0;
list_del_init(&hole->hole_stack); list_del_init(&hole->hole_stack);
} }
...@@ -184,13 +178,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, ...@@ -184,13 +178,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1; node->hole_follows = 1;
} }
return node; return 0;
} }
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
return NULL; node->start, node->size);
return -ENOSPC;
} }
EXPORT_SYMBOL(drm_mm_create_block); EXPORT_SYMBOL(drm_mm_reserve_node);
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size, unsigned long size,
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \ i915_debugfs.o \
i915_gpu_error.o \
i915_suspend.o \ i915_suspend.o \
i915_gem.o \ i915_gem.o \
i915_gem_context.o \ i915_gem_context.o \
......
此差异已折叠。
...@@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */ /* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */ /* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1; dev->vblank_disable_allowed = 1;
if (INTEL_INFO(dev)->num_pipes == 0) { if (INTEL_INFO(dev)->num_pipes == 0)
dev_priv->mm.suspended = 0;
return 0; return 0;
}
ret = intel_fbdev_init(dev); ret = intel_fbdev_init(dev);
if (ret) if (ret)
...@@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
return 0; return 0;
cleanup_gem: cleanup_gem:
...@@ -1558,8 +1553,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1558,8 +1553,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap; goto out_rmmap;
} }
dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
aperture_size); aperture_size);
/* The i915 workqueue is primarily used for batched retirement of /* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed * requests (and thus managing bo) once the task has been completed
...@@ -1612,7 +1607,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1612,7 +1607,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock); spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->backlight.lock); spin_lock_init(&dev_priv->backlight.lock);
mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
...@@ -1629,9 +1623,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1629,9 +1623,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload; goto out_gem_unload;
} }
/* Start out suspended */
dev_priv->mm.suspended = 1;
if (HAS_POWER_WELL(dev)) if (HAS_POWER_WELL(dev))
i915_init_power_well(dev); i915_init_power_well(dev);
...@@ -1641,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1641,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n"); DRM_ERROR("failed to init modeset\n");
goto out_gem_unload; goto out_gem_unload;
} }
} else {
/* Start out suspended in ums mode. */
dev_priv->ums.mm_suspended = 1;
} }
i915_setup_sysfs(dev); i915_setup_sysfs(dev);
...@@ -1667,7 +1661,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1667,7 +1661,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
out_mtrrfree: out_mtrrfree:
arch_phys_wc_del(dev_priv->mm.gtt_mtrr); arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable); io_mapping_free(dev_priv->gtt.mappable);
dev_priv->gtt.gtt_remove(dev); dev_priv->gtt.gtt_remove(dev);
out_rmmap: out_rmmap:
...@@ -1705,7 +1699,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1705,7 +1699,7 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable); io_mapping_free(dev_priv->gtt.mappable);
arch_phys_wc_del(dev_priv->mm.gtt_mtrr); arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister(); acpi_video_unregister();
......
...@@ -132,6 +132,11 @@ int i915_enable_ips __read_mostly = 1; ...@@ -132,6 +132,11 @@ int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600); module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
bool i915_fastboot __read_mostly = 0;
module_param_named(fastboot, i915_fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
"(default: false)");
static struct drm_driver driver; static struct drm_driver driver;
extern int intel_agp_enabled; extern int intel_agp_enabled;
...@@ -551,7 +556,11 @@ static int i915_drm_freeze(struct drm_device *dev) ...@@ -551,7 +556,11 @@ static int i915_drm_freeze(struct drm_device *dev)
/* If KMS is active, we do the leavevt stuff here */ /* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error = i915_gem_idle(dev); int error;
mutex_lock(&dev->struct_mutex);
error = i915_gem_idle(dev);
mutex_unlock(&dev->struct_mutex);
if (error) { if (error) {
dev_err(&dev->pdev->dev, dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n"); "GEM idle failed, resume might fail\n");
...@@ -656,7 +665,6 @@ static int __i915_drm_thaw(struct drm_device *dev) ...@@ -656,7 +665,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_init_pch_refclk(dev); intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev); error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -793,28 +801,29 @@ static int i965_reset_complete(struct drm_device *dev) ...@@ -793,28 +801,29 @@ static int i965_reset_complete(struct drm_device *dev)
static int i965_do_reset(struct drm_device *dev) static int i965_do_reset(struct drm_device *dev)
{ {
int ret; int ret;
u8 gdrst;
/* /*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as * Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit * well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it. * triggers the reset; when done, the hardware will clear it.
*/ */
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
pci_write_config_byte(dev->pdev, I965_GDRST, pci_write_config_byte(dev->pdev, I965_GDRST,
gdrst | GRDOM_RENDER | GRDOM_RENDER | GRDOM_RESET_ENABLE);
GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500); ret = wait_for(i965_reset_complete(dev), 500);
if (ret) if (ret)
return ret; return ret;
/* We can't reset render&media without also resetting display ... */ /* We can't reset render&media without also resetting display ... */
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
pci_write_config_byte(dev->pdev, I965_GDRST, pci_write_config_byte(dev->pdev, I965_GDRST,
gdrst | GRDOM_MEDIA | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
GRDOM_RESET_ENABLE);
return wait_for(i965_reset_complete(dev), 500); ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
return 0;
} }
static int ironlake_do_reset(struct drm_device *dev) static int ironlake_do_reset(struct drm_device *dev)
...@@ -955,11 +964,11 @@ int i915_reset(struct drm_device *dev) ...@@ -955,11 +964,11 @@ int i915_reset(struct drm_device *dev)
* switched away). * switched away).
*/ */
if (drm_core_check_feature(dev, DRIVER_MODESET) || if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) { !dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
int i; int i;
dev_priv->mm.suspended = 0; dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
......
...@@ -144,6 +144,7 @@ enum intel_dpll_id { ...@@ -144,6 +144,7 @@ enum intel_dpll_id {
struct intel_dpll_hw_state { struct intel_dpll_hw_state {
uint32_t dpll; uint32_t dpll;
uint32_t dpll_md;
uint32_t fp0; uint32_t fp0;
uint32_t fp1; uint32_t fp1;
}; };
...@@ -156,6 +157,8 @@ struct intel_shared_dpll { ...@@ -156,6 +157,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */ /* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id; enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state; struct intel_dpll_hw_state hw_state;
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv, void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll); struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv, void (*disable)(struct drm_i915_private *dev_priv,
...@@ -364,6 +367,7 @@ struct drm_i915_display_funcs { ...@@ -364,6 +367,7 @@ struct drm_i915_display_funcs {
* fills out the pipe-config with the hw state. */ * fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *, bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *); struct intel_crtc_config *);
void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y, int x, int y,
struct drm_framebuffer *old_fb); struct drm_framebuffer *old_fb);
...@@ -462,8 +466,12 @@ struct i915_gtt { ...@@ -462,8 +466,12 @@ struct i915_gtt {
void __iomem *gsm; void __iomem *gsm;
bool do_idle_maps; bool do_idle_maps;
dma_addr_t scratch_page_dma; struct {
struct page *scratch_page; dma_addr_t addr;
struct page *page;
} scratch;
int mtrr;
/* global gtt ops */ /* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
...@@ -477,21 +485,17 @@ struct i915_gtt { ...@@ -477,21 +485,17 @@ struct i915_gtt {
struct sg_table *st, struct sg_table *st,
unsigned int pg_start, unsigned int pg_start,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
dma_addr_t addr,
enum i915_cache_level level); enum i915_cache_level level);
}; };
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct drm_device *dev; struct drm_device *dev;
unsigned num_pd_entries; unsigned num_pd_entries;
struct page **pt_pages; struct page **pt_pages;
uint32_t pd_offset; uint32_t pd_offset;
dma_addr_t *pt_dma_addr; dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
/* pte functions, mirroring the interface of the global gtt. */ /* pte functions, mirroring the interface of the global gtt. */
void (*clear_range)(struct i915_hw_ppgtt *ppgtt, void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
...@@ -501,8 +505,7 @@ struct i915_hw_ppgtt { ...@@ -501,8 +505,7 @@ struct i915_hw_ppgtt {
struct sg_table *st, struct sg_table *st,
unsigned int pg_start, unsigned int pg_start,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
dma_addr_t addr,
enum i915_cache_level level); enum i915_cache_level level);
int (*enable)(struct drm_device *dev); int (*enable)(struct drm_device *dev);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt); void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
...@@ -528,17 +531,36 @@ struct i915_hw_context { ...@@ -528,17 +531,36 @@ struct i915_hw_context {
struct i915_ctx_hang_stats hang_stats; struct i915_ctx_hang_stats hang_stats;
}; };
enum no_fbc_reason { struct i915_fbc {
FBC_NO_OUTPUT, /* no outputs enabled to compress */ unsigned long size;
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ unsigned int fb_id;
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ enum plane plane;
FBC_MODE_TOO_LARGE, /* mode too large for compression */ int y;
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */ struct drm_mm_node *compressed_fb;
FBC_MULTIPLE_PIPES, /* more than one pipe active */ struct drm_mm_node *compressed_llb;
FBC_MODULE_PARAM,
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
} *fbc_work;
enum {
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
} no_fbc_reason;
}; };
enum intel_pch { enum intel_pch {
PCH_NONE = 0, /* No PCH present */ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */ PCH_IBX, /* Ibexpeak PCH */
...@@ -721,12 +743,12 @@ struct i915_suspend_saved_registers { ...@@ -721,12 +743,12 @@ struct i915_suspend_saved_registers {
}; };
struct intel_gen6_power_mgmt { struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work; struct work_struct work;
struct delayed_work vlv_work;
u32 pm_iir; u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */ /* On vlv we need to manually drop to Vmin with a delayed work. */
spinlock_t lock; struct delayed_work vlv_work;
/* The below variables an all the rps hw state are protected by /* The below variables an all the rps hw state are protected by
* dev->struct mutext. */ * dev->struct mutext. */
...@@ -792,6 +814,18 @@ struct i915_dri1_state { ...@@ -792,6 +814,18 @@ struct i915_dri1_state {
uint32_t counter; uint32_t counter;
}; };
struct i915_ums_state {
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int mm_suspended;
};
struct intel_l3_parity { struct intel_l3_parity {
u32 *remap_info; u32 *remap_info;
struct work_struct error_work; struct work_struct error_work;
...@@ -815,8 +849,6 @@ struct i915_gem_mm { ...@@ -815,8 +849,6 @@ struct i915_gem_mm {
/** Usable portion of the GTT for GEM */ /** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */ unsigned long stolen_base; /* limited to low memory (32-bit) */
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */ /** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
...@@ -864,16 +896,6 @@ struct i915_gem_mm { ...@@ -864,16 +896,6 @@ struct i915_gem_mm {
*/ */
bool interruptible; bool interruptible;
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
/** Bit 6 swizzling required for X tiling */ /** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x; uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */ /** Bit 6 swizzling required for Y tiling */
...@@ -896,6 +918,11 @@ struct drm_i915_error_state_buf { ...@@ -896,6 +918,11 @@ struct drm_i915_error_state_buf {
loff_t pos; loff_t pos;
}; };
struct i915_error_state_file_priv {
struct drm_device *dev;
struct drm_i915_error_state *error;
};
struct i915_gpu_error { struct i915_gpu_error {
/* For hangcheck timer */ /* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
...@@ -1058,12 +1085,7 @@ typedef struct drm_i915_private { ...@@ -1058,12 +1085,7 @@ typedef struct drm_i915_private {
int num_plane; int num_plane;
unsigned long cfb_size; struct i915_fbc fbc;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion; struct intel_opregion opregion;
struct intel_vbt_data vbt; struct intel_vbt_data vbt;
...@@ -1080,8 +1102,6 @@ typedef struct drm_i915_private { ...@@ -1080,8 +1102,6 @@ typedef struct drm_i915_private {
} backlight; } backlight;
/* LVDS info */ /* LVDS info */
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
bool no_aux_handshake; bool no_aux_handshake;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
...@@ -1141,11 +1161,6 @@ typedef struct drm_i915_private { ...@@ -1141,11 +1161,6 @@ typedef struct drm_i915_private {
/* Haswell power well */ /* Haswell power well */
struct i915_power_well power_well; struct i915_power_well power_well;
enum no_fbc_reason no_fbc_reason;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
struct i915_gpu_error gpu_error; struct i915_gpu_error gpu_error;
struct drm_i915_gem_object *vlv_pctx; struct drm_i915_gem_object *vlv_pctx;
...@@ -1172,6 +1187,8 @@ typedef struct drm_i915_private { ...@@ -1172,6 +1187,8 @@ typedef struct drm_i915_private {
/* Old dri1 support infrastructure, beware the dragons ya fools entering /* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */ * here! */
struct i915_dri1_state dri1; struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
} drm_i915_private_t; } drm_i915_private_t;
/* Iterate over initialised rings */ /* Iterate over initialised rings */
...@@ -1186,7 +1203,7 @@ enum hdmi_force_audio { ...@@ -1186,7 +1203,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */ HDMI_AUDIO_ON, /* force turn on HDMI audio */
}; };
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) #define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops { struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
...@@ -1212,7 +1229,7 @@ struct drm_i915_gem_object { ...@@ -1212,7 +1229,7 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops; const struct drm_i915_gem_object_ops *ops;
/** Current space allocated to this object in the GTT, if any. */ /** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space; struct drm_mm_node gtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */ /** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct list_head global_list; struct list_head global_list;
...@@ -1313,13 +1330,6 @@ struct drm_i915_gem_object { ...@@ -1313,13 +1330,6 @@ struct drm_i915_gem_object {
unsigned long exec_handle; unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry; struct drm_i915_gem_exec_object2 *exec_entry;
/**
* Current offset of the object in GTT space.
*
* This is the same as gtt_space->start
*/
uint32_t gtt_offset;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */ /** Breadcrumb of last rendering to the buffer. */
...@@ -1345,6 +1355,37 @@ struct drm_i915_gem_object { ...@@ -1345,6 +1355,37 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
/* Offset of the first PTE pointing to this object */
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
return o->gtt_space.start;
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
return drm_mm_node_allocated(&o->gtt_space);
}
/* The size used in the translation tables may be larger than the actual size of
* the object on GEN2/GEN3 because of the way tiling is handled. See
* i915_gem_get_gtt_size() for more details.
*/
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
return o->gtt_space.size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
o->gtt_space.color = color;
}
/** /**
* Request queue structure. * Request queue structure.
* *
...@@ -1542,6 +1583,7 @@ extern int i915_enable_ppgtt __read_mostly; ...@@ -1542,6 +1583,7 @@ extern int i915_enable_ppgtt __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly; extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly; extern int i915_enable_ips __read_mostly;
extern bool i915_fastboot __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev); extern int i915_resume(struct drm_device *dev);
...@@ -1585,21 +1627,12 @@ extern void intel_hpd_init(struct drm_device *dev); ...@@ -1585,21 +1627,12 @@ extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev); extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
void void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
#else
#define i915_destroy_error_state(x)
#endif
/* i915_gem.c */ /* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data, int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
...@@ -1910,8 +1943,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, ...@@ -1910,8 +1943,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
/* i915_debugfs.c */ /* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor); int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor);
/* i915_gpu_error.c */
__printf(2, 3) __printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
const struct i915_error_state_file_priv *error);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
size_t count, loff_t pos);
static inline void i915_error_state_buf_release(
struct drm_i915_error_state_buf *eb)
{
kfree(eb->buf);
}
void i915_capture_error_state(struct drm_device *dev);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
/* i915_suspend.c */ /* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev);
...@@ -1991,7 +2043,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, ...@@ -1991,7 +2043,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
/* overlay */ /* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error); struct intel_overlay_error_state *error);
...@@ -2000,7 +2051,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc ...@@ -2000,7 +2051,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev, struct drm_device *dev,
struct intel_display_error_state *error); struct intel_display_error_state *error);
#endif
/* On SNB platform, before reading ring registers forcewake bit /* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being * must be set to prevent GT core from power down and stale values being
......
...@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) ...@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{ {
return obj->gtt_space && !obj->active; return i915_gem_obj_ggtt_bound(obj) && !obj->active;
} }
int int
...@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count) if (obj->pin_count)
pinned += obj->gtt_space->size; pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.total; args->aper_size = dev_priv->gtt.total;
...@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */ * anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE) if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1; needs_clflush = 1;
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) if (ret)
return ret; return ret;
...@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr); user_data = to_user_ptr(args->data_ptr);
remain = args->size; remain = args->size;
offset = obj->gtt_offset + args->offset; offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) { while (remain > 0) {
/* Operation in this page /* Operation in this page
...@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */ * right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE) if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1; needs_clflush_after = 1;
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true); ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true; obj->fault_mappable = true;
pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
page_offset; pfn >>= PAGE_SHIFT;
pfn += page_offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
...@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL) if (obj->pages == NULL)
return 0; return 0;
BUG_ON(obj->gtt_space); BUG_ON(i915_gem_obj_ggtt_bound(obj));
if (obj->pages_pin_count) if (obj->pages_pin_count)
return -EBUSY; return -EBUSY;
...@@ -2085,7 +2086,7 @@ int __i915_add_request(struct intel_ring_buffer *ring, ...@@ -2085,7 +2086,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
trace_i915_gem_request_add(ring, request->seqno); trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0; ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) { if (!dev_priv->ums.mm_suspended) {
if (i915_enable_hangcheck) { if (i915_enable_hangcheck) {
mod_timer(&dev_priv->gpu_error.hangcheck_timer, mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
...@@ -2121,8 +2122,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) ...@@ -2121,8 +2122,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{ {
if (acthd >= obj->gtt_offset && if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
acthd < obj->gtt_offset + obj->base.size) acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return true; return true;
return false; return false;
...@@ -2180,11 +2181,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, ...@@ -2180,11 +2181,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != wait && if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) { i915_request_guilty(request, acthd, &inside)) {
DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name, ring->name,
inside ? "inside" : "flushing", inside ? "inside" : "flushing",
request->batch_obj ? request->batch_obj ?
request->batch_obj->gtt_offset : 0, i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
request->ctx ? request->ctx->id : 0, request->ctx ? request->ctx->id : 0,
acthd); acthd);
...@@ -2390,7 +2391,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -2390,7 +2391,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle &= list_empty(&ring->request_list); idle &= list_empty(&ring->request_list);
} }
if (!dev_priv->mm.suspended && !idle) if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ)); round_jiffies_up_relative(HZ));
if (idle) if (idle)
...@@ -2585,7 +2586,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2585,7 +2586,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret; int ret;
if (obj->gtt_space == NULL) if (!i915_gem_obj_ggtt_bound(obj))
return 0; return 0;
if (obj->pin_count) if (obj->pin_count)
...@@ -2624,9 +2625,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2624,9 +2625,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */ /* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true; obj->map_and_fenceable = true;
drm_mm_put_block(obj->gtt_space); drm_mm_remove_node(&obj->gtt_space);
obj->gtt_space = NULL;
obj->gtt_offset = 0;
return 0; return 0;
} }
...@@ -2681,12 +2680,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2681,12 +2680,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(fence_reg); POSTING_READ(fence_reg);
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val; uint64_t val;
val = (uint64_t)((obj->gtt_offset + size - 4096) & val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= 1 << I965_FENCE_TILING_Y_SHIFT;
...@@ -2710,15 +2709,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2710,15 +2709,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val; u32 val;
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val; int pitch_val;
int tile_width; int tile_width;
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size || (size & -size) != size ||
(obj->gtt_offset & (size - 1)), (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
obj->gtt_offset, obj->map_and_fenceable, size); i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128; tile_width = 128;
...@@ -2729,7 +2728,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2729,7 +2728,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width; pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = obj->gtt_offset; val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size); val |= I915_FENCE_SIZE_BITS(size);
...@@ -2754,19 +2753,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, ...@@ -2754,19 +2753,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val; uint32_t val;
if (obj) { if (obj) {
u32 size = obj->gtt_space->size; u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val; uint32_t pitch_val;
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size || (size & -size) != size ||
(obj->gtt_offset & (size - 1)), (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
"object 0x%08x not 512K or pot-size 0x%08x aligned\n", "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
obj->gtt_offset, size); i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128; pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
val = obj->gtt_offset; val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size); val |= I830_FENCE_SIZE_BITS(size);
...@@ -2983,7 +2982,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev, ...@@ -2983,7 +2982,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
if (HAS_LLC(dev)) if (HAS_LLC(dev))
return true; return true;
if (gtt_space == NULL) if (!drm_mm_node_allocated(gtt_space))
return true; return true;
if (list_empty(&gtt_space->node_list)) if (list_empty(&gtt_space->node_list))
...@@ -3016,8 +3015,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) ...@@ -3016,8 +3015,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) { if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
obj->gtt_space->start, i915_gem_obj_ggtt_offset(obj),
obj->gtt_space->start + obj->gtt_space->size, i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level, obj->cache_level,
obj->gtt_space->color); obj->gtt_space->color);
err++; err++;
...@@ -3028,8 +3027,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) ...@@ -3028,8 +3027,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space, obj->gtt_space,
obj->cache_level)) { obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
obj->gtt_space->start, i915_gem_obj_ggtt_offset(obj),
obj->gtt_space->start + obj->gtt_space->size, i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level); obj->cache_level);
err++; err++;
continue; continue;
...@@ -3051,7 +3050,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -3051,7 +3050,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment; u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable; bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ? size_t gtt_max = map_and_fenceable ?
...@@ -3096,14 +3094,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -3096,14 +3094,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free: search_free:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
&obj->gtt_space,
size, alignment, size, alignment,
obj->cache_level, 0, gtt_max); obj->cache_level, 0, gtt_max);
if (ret) { if (ret) {
...@@ -3115,34 +3108,31 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -3115,34 +3108,31 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free; goto search_free;
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
kfree(node);
return ret; return ret;
} }
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
obj->cache_level))) {
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node); drm_mm_remove_node(&obj->gtt_space);
return -EINVAL; return -EINVAL;
} }
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_object(obj);
if (ret) { if (ret) {
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node); drm_mm_remove_node(&obj->gtt_space);
return ret; return ret;
} }
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_space = node;
obj->gtt_offset = node->start;
fenceable = fenceable =
node->size == fence_size && i915_gem_obj_ggtt_size(obj) == fence_size &&
(node->start & (fence_alignment - 1)) == 0; (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
mappable = mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
...@@ -3244,7 +3234,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3244,7 +3234,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret; int ret;
/* Not valid to be called on unbound objects. */ /* Not valid to be called on unbound objects. */
if (obj->gtt_space == NULL) if (!i915_gem_obj_ggtt_bound(obj))
return -EINVAL; return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
...@@ -3303,13 +3293,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3303,13 +3293,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY; return -EBUSY;
} }
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
return ret; return ret;
} }
if (obj->gtt_space) { if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_finish_gpu(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3332,7 +3322,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3332,7 +3322,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level); obj, cache_level);
obj->gtt_space->color = cache_level; i915_gem_obj_ggtt_set_color(obj, cache_level);
} }
if (cache_level == I915_CACHE_NONE) { if (cache_level == I915_CACHE_NONE) {
...@@ -3613,14 +3603,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3613,14 +3603,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY; return -EBUSY;
if (obj->gtt_space != NULL) { if (i915_gem_obj_ggtt_bound(obj)) {
if ((alignment && obj->gtt_offset & (alignment - 1)) || if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) { (map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count, WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n", " obj->map_and_fenceable=%d\n",
obj->gtt_offset, alignment, i915_gem_obj_ggtt_offset(obj), alignment,
map_and_fenceable, map_and_fenceable,
obj->map_and_fenceable); obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
...@@ -3629,7 +3619,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3629,7 +3619,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
} }
} }
if (obj->gtt_space == NULL) { if (!i915_gem_obj_ggtt_bound(obj)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
...@@ -3655,7 +3645,7 @@ void ...@@ -3655,7 +3645,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj) i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pin_count == 0); BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL); BUG_ON(!i915_gem_obj_ggtt_bound(obj));
if (--obj->pin_count == 0) if (--obj->pin_count == 0)
obj->pin_mappable = false; obj->pin_mappable = false;
...@@ -3705,7 +3695,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -3705,7 +3695,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj->gtt_offset; args->offset = i915_gem_obj_ggtt_offset(obj);
out: out:
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
unlock: unlock:
...@@ -3974,9 +3964,7 @@ i915_gem_idle(struct drm_device *dev) ...@@ -3974,9 +3964,7 @@ i915_gem_idle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
mutex_lock(&dev->struct_mutex); if (dev_priv->ums.mm_suspended) {
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -3992,18 +3980,11 @@ i915_gem_idle(struct drm_device *dev) ...@@ -3992,18 +3980,11 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev); i915_gem_evict_everything(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev); i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
/* Cancel the retire work handler, which should be idle now. */ /* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.retire_work);
...@@ -4213,7 +4194,7 @@ int ...@@ -4213,7 +4194,7 @@ int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data, i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
...@@ -4225,7 +4206,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -4225,7 +4206,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0; dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev); ret = i915_gem_init_hw(dev);
if (ret != 0) { if (ret != 0) {
...@@ -4245,7 +4226,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -4245,7 +4226,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
cleanup_ringbuffer: cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
dev_priv->mm.suspended = 1; dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -4255,11 +4236,26 @@ int ...@@ -4255,11 +4236,26 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0; return 0;
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
return i915_gem_idle(dev);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
if (ret != 0)
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
return ret;
} }
void void
...@@ -4270,9 +4266,11 @@ i915_gem_lastclose(struct drm_device *dev) ...@@ -4270,9 +4266,11 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return; return;
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev); ret = i915_gem_idle(dev);
if (ret) if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret); DRM_ERROR("failed to idle hardware: %d\n", ret);
mutex_unlock(&dev->struct_mutex);
} }
static void static void
......
...@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring, ...@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN | MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN |
......
...@@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) ...@@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
return false; return false;
list_add(&obj->exec_list, unwind); list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space); return drm_mm_scan_add_block(&obj->gtt_space);
} }
int int
...@@ -107,7 +107,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -107,7 +107,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space); ret = drm_mm_scan_remove_block(&obj->gtt_space);
BUG_ON(ret); BUG_ON(ret);
list_del_init(&obj->exec_list); list_del_init(&obj->exec_list);
...@@ -127,7 +127,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -127,7 +127,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
obj = list_first_entry(&unwind_list, obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
exec_list); exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) { if (drm_mm_scan_remove_block(&obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list); list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
continue; continue;
......
...@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT; return -ENOENT;
target_i915_obj = to_intel_bo(target_obj); target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset; target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them * pipe_control writes because the gpu doesn't properly redirect them
...@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret; return ret;
/* Map the page containing the relocation we're going to perform. */ /* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset; reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK); reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *) reloc_entry = (uint32_t __iomem *)
...@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, ...@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1; obj->has_aliasing_ppgtt_mapping = 1;
} }
if (entry->offset != obj->gtt_offset) { if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
entry->offset = obj->gtt_offset; entry->offset = i915_gem_obj_ggtt_offset(obj);
*need_reloc = true; *need_reloc = true;
} }
...@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) ...@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_exec_object2 *entry; struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space) if (!i915_gem_obj_ggtt_bound(obj))
return; return;
entry = obj->exec_entry; entry = obj->exec_entry;
...@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable; bool need_fence, need_mappable;
if (!obj->gtt_space) if (!i915_gem_obj_ggtt_bound(obj))
continue; continue;
need_fence = need_fence =
...@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj); need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || if ((entry->alignment &&
i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable)) (need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else else
...@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Bind fresh objects */ /* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
if (obj->gtt_space) if (i915_gem_obj_ggtt_bound(obj))
continue; continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
...@@ -972,7 +973,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -972,7 +973,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret) if (ret)
goto pre_mutex_err; goto pre_mutex_err;
if (dev_priv->mm.suspended) { if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = -EBUSY; ret = -EBUSY;
goto pre_mutex_err; goto pre_mutex_err;
...@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err; goto err;
} }
exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_len = args->batch_len; exec_len = args->batch_len;
if (cliprects) { if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) { for (i = 0; i < args->num_cliprects; i++) {
......
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
/* PPGTT stuff */ /* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
...@@ -42,8 +45,7 @@ ...@@ -42,8 +45,7 @@
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = GEN6_PTE_VALID;
...@@ -69,8 +71,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, ...@@ -69,8 +71,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
#define BYT_PTE_WRITEABLE (1 << 1) #define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = GEN6_PTE_VALID;
...@@ -87,8 +88,7 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, ...@@ -87,8 +88,7 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
return pte; return pte;
} }
static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
dma_addr_t addr,
enum i915_cache_level level) enum i915_cache_level level)
{ {
gen6_gtt_pte_t pte = GEN6_PTE_VALID; gen6_gtt_pte_t pte = GEN6_PTE_VALID;
...@@ -185,13 +185,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, ...@@ -185,13 +185,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned first_entry,
unsigned num_entries) unsigned num_entries)
{ {
struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
gen6_gtt_pte_t *pt_vaddr, scratch_pte; gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
scratch_pte = ppgtt->pte_encode(ppgtt->dev, scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC); I915_CACHE_LLC);
while (num_entries) { while (num_entries) {
...@@ -227,8 +227,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, ...@@ -227,8 +227,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr; dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter); page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level);
cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) { if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr); kunmap_atomic(pt_vaddr);
act_pt++; act_pt++;
...@@ -278,7 +277,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -278,7 +277,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
} else { } else {
ppgtt->pte_encode = gen6_pte_encode; ppgtt->pte_encode = gen6_pte_encode;
} }
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable; ppgtt->enable = gen6_ppgtt_enable;
ppgtt->clear_range = gen6_ppgtt_clear_range; ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries; ppgtt->insert_entries = gen6_ppgtt_insert_entries;
...@@ -348,7 +347,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) ...@@ -348,7 +347,6 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
return -ENOMEM; return -ENOMEM;
ppgtt->dev = dev; ppgtt->dev = dev;
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
if (INTEL_INFO(dev)->gen < 8) if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt); ret = gen6_ppgtt_init(ppgtt);
...@@ -380,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -380,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
ppgtt->insert_entries(ppgtt, obj->pages, ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level); cache_level);
} }
...@@ -388,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, ...@@ -388,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
ppgtt->clear_range(ppgtt, ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
...@@ -480,7 +478,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, ...@@ -480,7 +478,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter); addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), iowrite32(dev_priv->gtt.pte_encode(addr, level),
&gtt_entries[i]); &gtt_entries[i]);
i++; i++;
} }
...@@ -493,7 +491,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, ...@@ -493,7 +491,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
*/ */
if (i != 0) if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) WARN_ON(readl(&gtt_entries[i-1])
!= dev_priv->gtt.pte_encode(dev, addr, level)); != dev_priv->gtt.pte_encode(addr, level));
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
...@@ -518,8 +516,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, ...@@ -518,8 +516,7 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
first_entry, num_entries, max_entries)) first_entry, num_entries, max_entries))
num_entries = max_entries; num_entries = max_entries;
scratch_pte = dev_priv->gtt.pte_encode(dev, scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr,
dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC); I915_CACHE_LLC);
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]); iowrite32(scratch_pte, &gtt_base[i]);
...@@ -554,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, ...@@ -554,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_insert_entries(dev, obj->pages, dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level); cache_level);
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
...@@ -566,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) ...@@ -566,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_clear_range(obj->base.dev, dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT, i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
...@@ -632,14 +629,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -632,14 +629,15 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */ /* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", int ret;
obj->gtt_offset, obj->base.size); DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, WARN_ON(i915_gem_obj_ggtt_bound(obj));
obj->gtt_offset, ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
obj->base.size, &obj->gtt_space);
false); if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
} }
...@@ -688,7 +686,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) ...@@ -688,7 +686,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 7) { if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the /* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */ * aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
} }
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
...@@ -699,7 +697,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) ...@@ -699,7 +697,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->mm.gtt_space); drm_mm_takedown(&dev_priv->mm.gtt_space);
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
} }
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
} }
...@@ -724,8 +722,8 @@ static int setup_scratch_page(struct drm_device *dev) ...@@ -724,8 +722,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else #else
dma_addr = page_to_phys(page); dma_addr = page_to_phys(page);
#endif #endif
dev_priv->gtt.scratch_page = page; dev_priv->gtt.scratch.page = page;
dev_priv->gtt.scratch_page_dma = dma_addr; dev_priv->gtt.scratch.addr = dma_addr;
return 0; return 0;
} }
...@@ -733,11 +731,11 @@ static int setup_scratch_page(struct drm_device *dev) ...@@ -733,11 +731,11 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev) static void teardown_scratch_page(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->gtt.scratch_page, 1); set_pages_wb(dev_priv->gtt.scratch.page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->gtt.scratch_page); put_page(dev_priv->gtt.scratch.page);
__free_page(dev_priv->gtt.scratch_page); __free_page(dev_priv->gtt.scratch.page);
} }
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
...@@ -849,34 +847,28 @@ int i915_gem_gtt_init(struct drm_device *dev) ...@@ -849,34 +847,28 @@ int i915_gem_gtt_init(struct drm_device *dev)
int ret; int ret;
if (INTEL_INFO(dev)->gen <= 5) { if (INTEL_INFO(dev)->gen <= 5) {
dev_priv->gtt.gtt_probe = i915_gmch_probe; gtt->gtt_probe = i915_gmch_probe;
dev_priv->gtt.gtt_remove = i915_gmch_remove; gtt->gtt_remove = i915_gmch_remove;
} else { } else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe; gtt->gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove; gtt->gtt_remove = gen6_gmch_remove;
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev))
dev_priv->gtt.pte_encode = hsw_pte_encode; gtt->pte_encode = hsw_pte_encode;
} else if (IS_VALLEYVIEW(dev)) { else if (IS_VALLEYVIEW(dev))
dev_priv->gtt.pte_encode = byt_pte_encode; gtt->pte_encode = byt_pte_encode;
} else { else
dev_priv->gtt.pte_encode = gen6_pte_encode; gtt->pte_encode = gen6_pte_encode;
}
} }
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, ret = gtt->gtt_probe(dev, &gtt->total, &gtt->stolen_size,
&dev_priv->gtt.stolen_size, &gtt->mappable_base, &gtt->mappable_end);
&gtt->mappable_base,
&gtt->mappable_end);
if (ret) if (ret)
return ret; return ret;
/* GMADR is the PCI mmio aperture into the global GTT. */ /* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n", DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20);
dev_priv->gtt.total >> 20); DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
dev_priv->gtt.mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
dev_priv->gtt.stolen_size >> 20);
return 0; return 0;
} }
...@@ -46,6 +46,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -46,6 +46,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev; struct pci_dev *pdev = dev_priv->bridge_dev;
struct resource *r;
u32 base; u32 base;
/* On the machines I have tested the Graphics Base of Stolen Memory /* On the machines I have tested the Graphics Base of Stolen Memory
...@@ -88,6 +89,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -88,6 +89,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
#endif #endif
} }
if (base == 0)
return 0;
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
}
return base; return base;
} }
...@@ -120,7 +137,7 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -120,7 +137,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb) if (!compressed_llb)
goto err_fb; goto err_fb;
dev_priv->compressed_llb = compressed_llb; dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE, I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start); dev_priv->mm.stolen_base + compressed_fb->start);
...@@ -128,8 +145,8 @@ static int i915_setup_compression(struct drm_device *dev, int size) ...@@ -128,8 +145,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->mm.stolen_base + compressed_llb->start); dev_priv->mm.stolen_base + compressed_llb->start);
} }
dev_priv->compressed_fb = compressed_fb; dev_priv->fbc.compressed_fb = compressed_fb;
dev_priv->cfb_size = size; dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size); size);
...@@ -150,7 +167,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) ...@@ -150,7 +167,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
if (!drm_mm_initialized(&dev_priv->mm.stolen)) if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV; return -ENODEV;
if (size < dev_priv->cfb_size) if (size < dev_priv->fbc.size)
return 0; return 0;
/* Release any current block */ /* Release any current block */
...@@ -163,16 +180,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) ...@@ -163,16 +180,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->cfb_size == 0) if (dev_priv->fbc.size == 0)
return; return;
if (dev_priv->compressed_fb) if (dev_priv->fbc.compressed_fb)
drm_mm_put_block(dev_priv->compressed_fb); drm_mm_put_block(dev_priv->fbc.compressed_fb);
if (dev_priv->compressed_llb) if (dev_priv->fbc.compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb); drm_mm_put_block(dev_priv->fbc.compressed_llb);
dev_priv->cfb_size = 0; dev_priv->fbc.size = 0;
} }
void i915_gem_cleanup_stolen(struct drm_device *dev) void i915_gem_cleanup_stolen(struct drm_device *dev)
...@@ -201,6 +218,9 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -201,6 +218,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved); bios_reserved);
...@@ -333,6 +353,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -333,6 +353,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen)) if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL; return NULL;
...@@ -347,11 +368,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -347,11 +368,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0)) if (WARN_ON(size == 0))
return NULL; return NULL;
stolen = drm_mm_create_block(&dev_priv->mm.stolen, stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
stolen_offset, size, if (!stolen)
false); return NULL;
if (stolen == NULL) {
stolen->start = stolen_offset;
stolen->size = size;
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n"); DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
return NULL; return NULL;
} }
...@@ -363,7 +389,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -363,7 +389,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
} }
/* Some objects just need physical mem from stolen space */ /* Some objects just need physical mem from stolen space */
if (gtt_offset == -1) if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj; return obj;
/* To simplify the initialisation sequence between KMS and GTT, /* To simplify the initialisation sequence between KMS and GTT,
...@@ -371,25 +397,27 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -371,25 +397,27 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur * setting up the GTT space. The actual reservation will occur
* later. * later.
*/ */
obj->gtt_space.start = gtt_offset;
obj->gtt_space.size = size;
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
gtt_offset, size, &obj->gtt_space);
false); if (ret) {
if (obj->gtt_space == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
drm_gem_object_unreference(&obj->base); goto unref_out;
return NULL;
} }
} else }
obj->gtt_space = I915_GTT_RESERVED;
obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
return obj; return obj;
unref_out:
drm_gem_object_unreference(&obj->base);
return NULL;
} }
void void
......
...@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true; return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) { if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj->gtt_offset & ~I915_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
if (obj->gtt_offset & ~I830_FENCE_START_MASK) if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false; return false;
} }
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size) if (i915_gem_obj_ggtt_size(obj) != size)
return false; return false;
if (obj->gtt_offset & (size - 1)) if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false; return false;
return true; return true;
...@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
obj->map_and_fenceable = obj->map_and_fenceable =
obj->gtt_space == NULL || !i915_gem_obj_ggtt_bound(obj) ||
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode)); i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */ /* Rebind if we need a change of alignment */
...@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_get_gtt_alignment(dev, obj->base.size, i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode, args->tiling_mode,
false); false);
if (obj->gtt_offset & (unfenced_alignment - 1)) if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
} }
......
此差异已折叠。
此差异已折叠。
...@@ -363,6 +363,7 @@ ...@@ -363,6 +363,7 @@
#define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4 #define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8 #define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
...@@ -680,6 +681,7 @@ ...@@ -680,6 +681,7 @@
#define ERR_INT_FIFO_UNDERRUN_C (1<<6) #define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3) #define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0) #define ERR_INT_FIFO_UNDERRUN_A (1<<0)
#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define FPGA_DBG 0x42300 #define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31) #define FPGA_DBG_RM_NOCLAIM (1<<31)
...@@ -1125,7 +1127,8 @@ ...@@ -1125,7 +1127,8 @@
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31) #define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30) #define DPLL_SDVO_HIGH_SPEED (1 << 30)
#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29) #define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
...@@ -3880,6 +3883,7 @@ ...@@ -3880,6 +3883,7 @@
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
/* digital port hotplug */ /* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
......
...@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = { ...@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
NULL, NULL,
}; };
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
ssize_t ret_count = 0;
int ret;
memset(&error_priv, 0, sizeof(error_priv));
ret = i915_error_state_buf_init(&error_str, count, off);
if (ret)
return ret;
error_priv.dev = dev;
i915_error_state_get(dev, &error_priv);
ret = i915_error_state_to_str(&error_str, &error_priv);
if (ret)
goto out;
ret_count = count < error_str.bytes ? count : error_str.bytes;
memcpy(buf, error_str.buf, ret_count);
out:
i915_error_state_put(&error_priv);
i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
struct drm_device *dev = minor->dev;
int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
return count;
}
static struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
.read = error_state_read,
.write = error_state_write,
};
void i915_setup_sysfs(struct drm_device *dev) void i915_setup_sysfs(struct drm_device *dev)
{ {
int ret; int ret;
...@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev) ...@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret) if (ret)
DRM_ERROR("gen6 sysfs setup failed\n"); DRM_ERROR("gen6 sysfs setup failed\n");
} }
ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
} }
void i915_teardown_sysfs(struct drm_device *dev) void i915_teardown_sysfs(struct drm_device *dev)
{ {
sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM #ifdef CONFIG_PM
......
...@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, ...@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->offset = obj->gtt_space->start; __entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = obj->gtt_space->size; __entry->size = i915_gem_obj_ggtt_size(obj);
__entry->mappable = mappable; __entry->mappable = mappable;
), ),
...@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, ...@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->offset = obj->gtt_space->start; __entry->offset = i915_gem_obj_ggtt_offset(obj);
__entry->size = obj->gtt_space->size; __entry->size = i915_gem_obj_ggtt_size(obj);
), ),
TP_printk("obj=%p, offset=%08x size=%x", TP_printk("obj=%p, offset=%08x size=%x",
......
...@@ -1360,6 +1360,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder, ...@@ -1360,6 +1360,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
} }
pipe_config->adjusted_mode.flags |= flags; pipe_config->adjusted_mode.flags |= flags;
if (dp_to_dig_port(intel_dp)->port == PORT_A) {
if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
}
} }
static void intel_disable_dp(struct intel_encoder *encoder) static void intel_disable_dp(struct intel_encoder *encoder)
......
...@@ -549,13 +549,6 @@ struct intel_unpin_work { ...@@ -549,13 +549,6 @@ struct intel_unpin_work {
bool enable_stall_check; bool enable_stall_check;
}; };
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int interval;
};
int intel_pch_rawclk(struct drm_device *dev); int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector, int intel_connector_update_modes(struct drm_connector *connector,
...@@ -747,6 +740,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data, ...@@ -747,6 +740,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev); extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev); extern void intel_fb_restore_mode(struct drm_device *dev);
struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state); bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
...@@ -780,7 +789,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, ...@@ -780,7 +789,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
extern void intel_init_pm(struct drm_device *dev); extern void intel_init_pm(struct drm_device *dev);
/* FBC */ /* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev); extern void intel_update_fbc(struct drm_device *dev);
/* IPS */ /* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
......
...@@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, ...@@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
u32 dvo_val; u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
int dpll_reg = DPLL(pipe);
switch (dvo_reg) { switch (dvo_reg) {
case DVOA: case DVOA:
...@@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, ...@@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH; dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
/*I915_WRITE(DVOB_SRCDIM, /*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
......
...@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = info->screen_base =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size); size);
if (!info->screen_base) { if (!info->screen_base) {
ret = -ENOSPC; ret = -ENOSPC;
...@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height, fb->width, fb->height,
obj->gtt_offset, obj); i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -115,17 +115,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, ...@@ -115,17 +115,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
* This is an exception to the general rule that mode_set doesn't turn * This is an exception to the general rule that mode_set doesn't turn
* things on. * things on.
*/ */
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{ {
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode = struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode; lvds_encoder->attached_connector->base.panel.fixed_mode;
int pipe = intel_crtc->pipe; int pipe = crtc->pipe;
u32 temp; u32 temp;
if (HAS_PCH_SPLIT(dev)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
intel_crtc_to_shared_dpll(crtc));
} else {
assert_pll_disabled(dev_priv, pipe);
}
temp = I915_READ(lvds_encoder->reg); temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
...@@ -142,7 +150,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) ...@@ -142,7 +150,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */ /* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE; temp &= ~LVDS_BORDER_ENABLE;
temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to /* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not. * set the DPLLs for dual-channel mode or not.
*/ */
...@@ -162,8 +170,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) ...@@ -162,8 +170,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) { if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists /* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */ * for 18bpp panels. */
if (intel_crtc->config.dither && if (crtc->config.dither && crtc->config.pipe_bpp == 18)
intel_crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER; temp |= LVDS_ENABLE_DITHER;
else else
temp &= ~LVDS_ENABLE_DITHER; temp &= ~LVDS_ENABLE_DITHER;
...@@ -955,7 +962,7 @@ void intel_lvds_init(struct drm_device *dev) ...@@ -955,7 +962,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS); DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds; intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config; intel_encoder->compute_config = intel_lvds_compute_config;
intel_encoder->disable = intel_disable_lvds; intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_hw_state = intel_lvds_get_hw_state;
......
...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) ...@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else else
regs = io_mapping_map_wc(dev_priv->gtt.mappable, regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
} }
...@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w; swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h; sheight = params->src_h;
iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y; ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) { if (params->format & I915_OVERLAY_YUV_PLANAR) {
...@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale); params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16; sheight |= (params->src_h/uv_vscale) << 16;
iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V); iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16; ostride |= params->stride_UV << 16;
} }
...@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
} }
overlay->flip_addr = reg_bo->gtt_offset; overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) { if (ret) {
...@@ -1412,9 +1412,6 @@ void intel_cleanup_overlay(struct drm_device *dev) ...@@ -1412,9 +1412,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay); kfree(dev_priv->overlay);
} }
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
struct intel_overlay_error_state { struct intel_overlay_error_state {
struct overlay_registers regs; struct overlay_registers regs;
unsigned long base; unsigned long base;
...@@ -1435,7 +1432,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1435,7 +1432,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr; overlay->reg_bo->phys_obj->handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
} }
...@@ -1468,7 +1465,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) ...@@ -1468,7 +1465,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else else
error->base = overlay->reg_bo->gtt_offset; error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay); regs = intel_overlay_map_regs_atomic(overlay);
if (!regs) if (!regs)
...@@ -1537,4 +1534,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m, ...@@ -1537,4 +1534,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV); P(UVSCALEV);
#undef P #undef P
} }
#endif
此差异已折叠。
...@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers * registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring * also enforces ordering), otherwise the hw might lose the new ring
* register values. */ * register values. */
I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring, I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES) ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID); | RING_VALID);
/* If the head is still not zero, the ring is dead */ /* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset && I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed " DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
...@@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring) ...@@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (ret) if (ret)
goto err_unref; goto err_unref;
pc->gtt_offset = obj->gtt_offset; pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl)); pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) { if (pc->cpu_page == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false; return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount.gt++ == 0) { if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR); POSTING_READ(GTIMR);
...@@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount.gt == 0) { if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask; dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR); POSTING_READ(GTIMR);
...@@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
return false; return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount.gt++ == 0) { if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask; dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR); POSTING_READ(IMR);
...@@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount.gt == 0) { if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask; dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR); POSTING_READ(IMR);
...@@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
return false; return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount.gt++ == 0) { if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask; dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR); POSTING_READ16(IMR);
...@@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount.gt == 0) { if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask; dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR); POSTING_READ16(IMR);
...@@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) ...@@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_get(dev_priv); gen6_gt_force_wake_get(dev_priv);
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount.gt++ == 0) { if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, I915_WRITE_IMR(ring,
~(ring->irq_enable_mask | ~(ring->irq_enable_mask |
...@@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) ...@@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount.gt == 0) { if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
...@@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) ...@@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
spin_lock_irqsave(&dev_priv->rps.lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount.pm++ == 0) { if (ring->irq_refcount++ == 0) {
u32 pm_imr = I915_READ(GEN6_PMIMR); u32 pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE_IMR(ring, ~ring->irq_enable_mask); I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
POSTING_READ(GEN6_PMIMR); POSTING_READ(GEN6_PMIMR);
} }
spin_unlock_irqrestore(&dev_priv->rps.lock, flags); spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true; return true;
} }
...@@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) ...@@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled) if (!dev->irq_enabled)
return; return;
spin_lock_irqsave(&dev_priv->rps.lock, flags); spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount.pm == 0) { if (--ring->irq_refcount == 0) {
u32 pm_imr = I915_READ(GEN6_PMIMR); u32 pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE_IMR(ring, ~0); I915_WRITE_IMR(ring, ~0);
I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
POSTING_READ(GEN6_PMIMR); POSTING_READ(GEN6_PMIMR);
} }
spin_unlock_irqrestore(&dev_priv->rps.lock, flags); spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
} }
static int static int
...@@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, ...@@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring); intel_ring_advance(ring);
} else { } else {
struct drm_i915_gem_object *obj = ring->private; struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset; u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT) if (len > I830_BATCH_LIMIT)
return -ENOSPC; return -ENOSPC;
...@@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err_unref; goto err_unref;
} }
ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) { if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin; goto err_unpin;
ring->virtual_start = ring->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size); ring->size);
if (ring->virtual_start == NULL) { if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n"); DRM_ERROR("Failed to map ringbuffer.\n");
...@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ...@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno; ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno; ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
PM_VEBOX_CS_ERROR_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq; ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq; ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册