提交 e6994aee 编写于 作者: C Chris Wilson 提交者: Daniel Vetter

drm/i915: Export ability of changing cache levels to userspace

By selecting the cache level (essentially whether or not the CPU snoops
any updates to the bo, and on more recent machines whether it resides
inside the CPU's last-level-cache) a userspace driver is able to then
manage all of its memory within buffer objects, if it so desires. This
enables the userspace driver to accelerate uploads and more importantly
downloads from the GPU and to able to mix CPU and GPU rendering/activity
efficiently.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
[danvet: Added code comment about where we plan to stuff platform
specific cacheing control bits in the ioctl struct.]
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 42d6ab48
...@@ -1835,6 +1835,8 @@ struct drm_ioctl_desc i915_ioctls[] = { ...@@ -1835,6 +1835,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHEING, i915_gem_set_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHEING, i915_gem_get_cacheing_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
......
...@@ -848,9 +848,9 @@ enum hdmi_force_audio { ...@@ -848,9 +848,9 @@ enum hdmi_force_audio {
}; };
enum i915_cache_level { enum i915_cache_level {
I915_CACHE_NONE, I915_CACHE_NONE = 0,
I915_CACHE_LLC, I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+ */ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
}; };
struct drm_i915_gem_object { struct drm_i915_gem_object {
...@@ -1238,6 +1238,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ...@@ -1238,6 +1238,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data, int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
......
...@@ -3015,6 +3015,68 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3015,6 +3015,68 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
int i915_gem_get_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_cacheing *args = data;
struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
args->cacheing = obj->cache_level != I915_CACHE_NONE;
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int i915_gem_set_cacheing_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_cacheing *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
switch (args->cacheing) {
case I915_CACHEING_NONE:
level = I915_CACHE_NONE;
break;
case I915_CACHEING_CACHED:
level = I915_CACHE_LLC;
break;
default:
return -EINVAL;
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
ret = i915_gem_object_set_cache_level(obj, level);
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* /*
* Prepare buffer for display plane (scanout, cursors, etc). * Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows * Can be called from an uninterruptible phase (modesetting) and allows
......
...@@ -716,10 +716,16 @@ struct drm_i915_gem_busy { ...@@ -716,10 +716,16 @@ struct drm_i915_gem_busy {
#define I915_CACHEING_CACHED 1 #define I915_CACHEING_CACHED 1
struct drm_i915_gem_cacheing { struct drm_i915_gem_cacheing {
/** Handle of the buffer to set/get the cacheing level of */ /**
* Handle of the buffer to set/get the cacheing level of. */
__u32 handle; __u32 handle;
/** Cacheing level to apply or return value */ /**
* Cacheing level to apply or return value
*
* bits0-15 are for generic cacheing control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 cacheing; __u32 cacheing;
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册