diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 816cacddbe883a8fbc156cacf1a98e19a2aeeabb..7210642e29ad8880191ace04184e58a98bee2051 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1123,7 +1123,17 @@ static int i915_pm_freeze(struct device *dev) static int i915_pm_freeze_late(struct device *dev) { - return i915_pm_suspend_late(dev); + int ret; + + ret = i915_pm_suspend_late(dev); + if (ret) + return ret; + + ret = i915_gem_freeze_late(dev_to_i915(dev)); + if (ret) + return ret; + + return 0; } /* thaw: called after creating the hibernation image, but before turning off. */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1ba614193cc9088226de9c34197b4408764047a5..72f0b02a837205f6821ebb8fd946ec0a70a78612 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2976,6 +2976,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); +int i915_gem_freeze_late(struct drm_i915_private *dev_priv); + void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b1a33fe95ab3a84a6618ed7ba95eb52d140debed..710c2bef9ebfa791e4299f2a6fad533745e4a842 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5054,6 +5054,34 @@ void i915_gem_load_cleanup(struct drm_device *dev) kmem_cache_destroy(dev_priv->objects); } +int i915_gem_freeze_late(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + /* Called just before we write the hibernation image. + * + * We need to update the domain tracking to reflect that the CPU + * will be accessing all the pages to create and restore from the + * hibernation, and so upon restoration those pages will be in the + * CPU domain. + * + * To make sure the hibernation image contains the latest state, + * we update that state just before writing out the image. + */ + + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv;