提交 8038d2a9 编写于 作者: D Dave Airlie

Merge tag 'vmwgfx-next-4.19-2' of git://people.freedesktop.org/~thomash/linux into drm-next

A series of cleanups / reorganizations and modesetting changes that
mostly target atomic state validation.

[airlied: conflicts with SPDX stuff in amdgpu tree]
Signed-off-by: NDave Airlie <airlied@redhat.com>

Link: https://patchwork.freedesktop.org/patch/msgid/1a88485e-e509-b00e-8485-19194f074115@vmware.com
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
......
...@@ -673,8 +673,34 @@ SVGASignedPoint; ...@@ -673,8 +673,34 @@ SVGASignedPoint;
* SVGA_CAP_GBOBJECTS -- * SVGA_CAP_GBOBJECTS --
* Enable guest-backed objects and surfaces. * Enable guest-backed objects and surfaces.
* *
* SVGA_CAP_CMD_BUFFERS_3 -- * SVGA_CAP_DX --
* Enable support for command buffers in a mob. * Enable support for DX commands, and command buffers in a mob.
*
* SVGA_CAP_HP_CMD_QUEUE --
* Enable support for the high priority command queue, and the
* ScreenCopy command.
*
* SVGA_CAP_NO_BB_RESTRICTION --
* Allow ScreenTargets to be defined without regard to the 32-bpp
* bounding-box memory restrictions. ie:
*
* The summed memory usage of all screens (assuming they were defined as
* 32-bpp) must always be less than the value of the
* SVGA_REG_MAX_PRIMARY_MEM register.
*
* If this cap is not present, the 32-bpp bounding box around all screens
* must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
* register.
*
* If the cap is present, the bounding box restriction is lifted (and only
* the screen-sum limit applies).
*
* (Note that this is a slight lie... there is still a sanity limit on any
* dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
* when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
* large enough to express any possible topology without holes between
* monitors.)
*
*/ */
#define SVGA_CAP_NONE 0x00000000 #define SVGA_CAP_NONE 0x00000000
...@@ -700,6 +726,7 @@ SVGASignedPoint; ...@@ -700,6 +726,7 @@ SVGASignedPoint;
#define SVGA_CAP_GBOBJECTS 0x08000000 #define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000 #define SVGA_CAP_DX 0x10000000
#define SVGA_CAP_HP_CMD_QUEUE 0x20000000 #define SVGA_CAP_HP_CMD_QUEUE 0x20000000
#define SVGA_CAP_NO_BB_RESTRICTION 0x40000000
#define SVGA_CAP_CMD_RESERVED 0x80000000 #define SVGA_CAP_CMD_RESERVED 0x80000000
......
此差异已折叠。
...@@ -38,7 +38,7 @@ struct vmw_user_context { ...@@ -38,7 +38,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
spinlock_t cotable_lock; spinlock_t cotable_lock;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
}; };
static void vmw_user_context_free(struct vmw_resource *res); static void vmw_user_context_free(struct vmw_resource *res);
...@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, ...@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, ...@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
(void) vmw_execbuf_fence_commands(NULL, dev_priv, (void) vmw_execbuf_fence_commands(NULL, dev_priv,
&fence, NULL); &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx) ...@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise. * specified in the parameter. 0 otherwise.
*/ */
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob) struct vmw_buffer_object *mob)
{ {
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res); container_of(ctx_res, struct vmw_user_context, res);
...@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
if (mob == NULL) { if (mob == NULL) {
if (uctx->dx_query_mob) { if (uctx->dx_query_mob) {
uctx->dx_query_mob->dx_query_ctx = NULL; uctx->dx_query_mob->dx_query_ctx = NULL;
vmw_dmabuf_unreference(&uctx->dx_query_mob); vmw_bo_unreference(&uctx->dx_query_mob);
uctx->dx_query_mob = NULL; uctx->dx_query_mob = NULL;
} }
...@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
mob->dx_query_ctx = ctx_res; mob->dx_query_ctx = ctx_res;
if (!uctx->dx_query_mob) if (!uctx->dx_query_mob)
uctx->dx_query_mob = vmw_dmabuf_reference(mob); uctx->dx_query_mob = vmw_bo_reference(mob);
return 0; return 0;
} }
...@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, ...@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
* *
* @ctx_res: The context resource * @ctx_res: The context resource
*/ */
struct vmw_dma_buffer * struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res) vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{ {
struct vmw_user_context *uctx = struct vmw_user_context *uctx =
......
...@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, ...@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(bo, fence); vmw_bo_fence_single(bo, fence);
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
...@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) ...@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
} }
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
vmw_fence_single_bo(&res->backup->base, fence); vmw_bo_fence_single(&res->backup->base, fence);
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return 0; return 0;
...@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup; struct vmw_buffer_object *buf, *old_buf = res->backup;
struct ttm_buffer_object *bo, *old_bo = &res->backup->base; struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
size_t old_size = res->backup_size; size_t old_size = res->backup_size;
size_t old_size_read_back = vcotbl->size_read_back; size_t old_size_read_back = vcotbl->size_read_back;
...@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
true, vmw_dmabuf_bo_free); true, vmw_bo_bo_free);
if (ret) { if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n"); DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret; return ret;
...@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
/* Let go of the old mob. */ /* Let go of the old mob. */
list_del(&res->mob_head); list_del(&res->mob_head);
list_add_tail(&res->mob_head, &buf->res_list); list_add_tail(&res->mob_head, &buf->res_list);
vmw_dmabuf_unreference(&old_buf); vmw_bo_unreference(&old_buf);
res->id = vcotbl->type; res->id = vcotbl->type;
return 0; return 0;
...@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -491,7 +491,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ttm_bo_kunmap(&old_map); ttm_bo_kunmap(&old_map);
out_wait: out_wait:
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
vmw_dmabuf_unreference(&buf); vmw_bo_unreference(&buf);
return ret; return ret;
} }
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <drm/ttm/ttm_placement.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
/**
* vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @placement: The placement to pin it.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
struct ttm_placement *placement,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
if (buf->pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
}
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve:
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_pin_in_vram - Move a buffer to vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
interruptible);
}
/**
* vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
*
* This function takes the reservation_sem in write mode.
* Flushes and unpins the query bo to avoid failures.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to pin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->num_pages;
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
placement.busy_placement = &place;
ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
/*
* Is this buffer already in vram but not at the start of it?
* In that case, evict it first because TTM isn't good at handling
* that situation.
*/
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
buf->pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
if (!ret)
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
*
* This function takes the reservation_sem in write mode.
*
* @dev_priv: Driver private.
* @buf: DMA buffer to unpin.
* @interruptible: Use interruptible wait.
*
* Returns
* -ERESTARTSYS if interrupted by a signal.
*/
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible)
{
struct ttm_buffer_object *bo = &buf->base;
int ret;
ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
vmw_bo_pin_reserved(buf, false);
ttm_bo_unreserve(bo);
err:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
/**
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
* of a buffer.
*
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
* @ptr: SVGAGuestPtr returning the result.
*/
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
SVGAGuestPtr *ptr)
{
if (bo->mem.mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
ptr->offset = bo->offset;
} else {
ptr->gmrId = bo->mem.start;
ptr->offset = 0;
}
}
/**
* vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
*
* @vbo: The buffer object. Must be reserved.
* @pin: Whether to pin or unpin.
*
*/
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
lockdep_assert_held(&bo->resv->lock.base);
if (pin) {
if (vbo->pin_count++ > 0)
return;
} else {
WARN_ON(vbo->pin_count <= 0);
if (--vbo->pin_count > 0)
return;
}
pl.fpfn = 0;
pl.lpfn = 0;
pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
if (pin)
pl.flags |= TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
/*
* vmw_dma_buffer_unmap - Tear down a cached buffer object map.
*
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
* vmw_dma_buffer_map_and_cache().
*/
void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
}
/*
* vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
*
* @vbo: The buffer object to map
* Return: A kernel virtual address or NULL if mapping failed.
*
* This function maps a buffer object into the kernel address space, or
* returns the virtual kernel address of an already existing map. The virtual
* address remains valid as long as the buffer object is pinned or reserved.
* The cached map is torn down on either
* 1) Buffer object move
* 2) Buffer object swapout
* 3) Buffer object destruction
*
*/
void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
{
struct ttm_buffer_object *bo = &vbo->base;
bool not_used;
void *virtual;
int ret;
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
...@@ -153,9 +153,9 @@ ...@@ -153,9 +153,9 @@
static const struct drm_ioctl_desc vmw_ioctls[] = { static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl, vmw_kms_cursor_bypass_ioctl,
...@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
vmw_gb_surface_reference_ioctl, vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_SYNCCPU, VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl, vmw_user_bo_synccpu_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
vmw_extended_context_define_ioctl, vmw_extended_context_define_ioctl,
...@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities) ...@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{ {
int ret; int ret;
struct vmw_dma_buffer *vbo; struct vmw_buffer_object *vbo;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result; volatile SVGA3dQueryResult *result;
bool dummy; bool dummy;
...@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (!vbo) if (!vbo)
return -ENOMEM; return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
&vmw_sys_ne_placement, false, &vmw_sys_ne_placement, false,
&vmw_dmabuf_bo_free); &vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) ...@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n"); DRM_ERROR("Dummy query buffer map failed.\n");
vmw_dmabuf_unreference(&vbo); vmw_bo_unreference(&vbo);
} else } else
dev_priv->dummy_query_bo = vbo; dev_priv->dummy_query_bo = vbo;
...@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) ...@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
BUG_ON(dev_priv->pinned_bo != NULL); BUG_ON(dev_priv->pinned_bo != NULL);
vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); vmw_bo_unreference(&dev_priv->dummy_query_bo);
if (dev_priv->cman) if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman); vmw_cmdbuf_remove_pool(dev_priv->cman);
...@@ -644,6 +644,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -644,6 +644,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->binding_mutex);
mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex); mutex_init(&dev_priv->global_kms_state_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem); ttm_lock_init(&dev_priv->reservation_sem);
......
...@@ -86,7 +86,7 @@ struct vmw_fpriv { ...@@ -86,7 +86,7 @@ struct vmw_fpriv {
bool gb_aware; bool gb_aware;
}; };
struct vmw_dma_buffer { struct vmw_buffer_object {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head res_list; struct list_head res_list;
s32 pin_count; s32 pin_count;
...@@ -120,7 +120,7 @@ struct vmw_resource { ...@@ -120,7 +120,7 @@ struct vmw_resource {
unsigned long backup_size; unsigned long backup_size;
bool res_dirty; /* Protected by backup buffer reserved */ bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */ bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup; struct vmw_buffer_object *backup;
unsigned long backup_offset; unsigned long backup_offset;
unsigned long pin_count; /* Protected by resource reserved */ unsigned long pin_count; /* Protected by resource reserved */
const struct vmw_res_func *func; const struct vmw_res_func *func;
...@@ -304,7 +304,7 @@ struct vmw_sw_context{ ...@@ -304,7 +304,7 @@ struct vmw_sw_context{
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct list_head resource_list; struct list_head resource_list;
struct list_head ctx_resource_list; /* For contexts and cotables */ struct list_head ctx_resource_list; /* For contexts and cotables */
struct vmw_dma_buffer *cur_query_bo; struct vmw_buffer_object *cur_query_bo;
struct list_head res_relocations; struct list_head res_relocations;
uint32_t *buf_start; uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_res_cache_entry res_cache[vmw_res_max];
...@@ -315,7 +315,7 @@ struct vmw_sw_context{ ...@@ -315,7 +315,7 @@ struct vmw_sw_context{
bool staged_bindings_inuse; bool staged_bindings_inuse;
struct list_head staged_cmd_res; struct list_head staged_cmd_res;
struct vmw_resource_val_node *dx_ctx_node; struct vmw_resource_val_node *dx_ctx_node;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct vmw_resource *dx_query_ctx; struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man; struct vmw_cmdbuf_res_manager *man;
}; };
...@@ -411,6 +411,15 @@ struct vmw_private { ...@@ -411,6 +411,15 @@ struct vmw_private {
uint32_t num_displays; uint32_t num_displays;
/*
* Currently requested_layout_mutex is used to protect the gui
* positionig state in display unit. With that use case currently this
* mutex is only taken during layout ioctl and atomic check_modeset.
* Other display unit state can be protected with this mutex but that
* needs careful consideration.
*/
struct mutex requested_layout_mutex;
/* /*
* Framebuffer info. * Framebuffer info.
*/ */
...@@ -513,8 +522,8 @@ struct vmw_private { ...@@ -513,8 +522,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex. * are protected by the cmdbuf mutex.
*/ */
struct vmw_dma_buffer *dummy_query_bo; struct vmw_buffer_object *dummy_query_bo;
struct vmw_dma_buffer *pinned_bo; struct vmw_buffer_object *pinned_bo;
uint32_t query_cid; uint32_t query_cid;
uint32_t query_cid_valid; uint32_t query_cid_valid;
bool dummy_query_bo_pinned; bool dummy_query_bo_pinned;
...@@ -623,43 +632,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -623,43 +632,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf); struct vmw_buffer_object **out_buf);
extern int vmw_user_resource_lookup_handle( extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t handle, uint32_t handle,
const struct vmw_user_resource_conv *converter, const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res); struct vmw_resource **p_res);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
...@@ -670,43 +649,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, ...@@ -670,43 +649,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource **out); struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res, extern void vmw_resource_unreserve(struct vmw_resource *res,
bool switch_backup, bool switch_backup,
struct vmw_dma_buffer *new_backup, struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset); unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo, extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo); extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
/** /**
* DMA buffer helper routines - vmwgfx_dmabuf.c * Buffer object helper functions - vmwgfx_bo.c
*/ */
extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv, extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo, struct vmw_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible);
extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_buffer_object *buf,
bool interruptible);
extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
bool interruptible); bool interruptible);
extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *bo,
bool interruptible); bool interruptible);
extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible);
extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo,
bool interruptible);
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf, extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr); SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin); extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo); extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo); extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t size,
bool shareable,
uint32_t *handle,
struct vmw_buffer_object **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
struct vmw_buffer_object *dma_buf,
uint32_t *handle);
extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_buffer_object **out,
struct ttm_base_object **base);
extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
/** /**
* Misc Ioctl functionality - vmwgfx_ioctl.c * Misc Ioctl functionality - vmwgfx_ioctl.c
...@@ -758,7 +764,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv); ...@@ -758,7 +764,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/** /**
* TTM buffer object driver - vmwgfx_buffer.c * TTM buffer object driver - vmwgfx_ttm_buffer.c
*/ */
extern const size_t vmw_tt_size; extern const size_t vmw_tt_size;
...@@ -1041,8 +1047,8 @@ vmw_context_binding_state(struct vmw_resource *ctx); ...@@ -1041,8 +1047,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback); bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res, extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
struct vmw_dma_buffer *mob); struct vmw_buffer_object *mob);
extern struct vmw_dma_buffer * extern struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res); vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
...@@ -1224,6 +1230,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -1224,6 +1230,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
u32 w, u32 h, u32 w, u32 h,
struct vmw_diff_cpy *diff); struct vmw_diff_cpy *diff);
/* Host messaging -vmwgfx_msg.c: */
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
/** /**
* Inline helper functions * Inline helper functions
*/ */
...@@ -1243,9 +1254,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) ...@@ -1243,9 +1254,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf; return srf;
} }
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
{ {
struct vmw_dma_buffer *tmp_buf = *buf; struct vmw_buffer_object *tmp_buf = *buf;
*buf = NULL; *buf = NULL;
if (tmp_buf != NULL) { if (tmp_buf != NULL) {
...@@ -1255,7 +1266,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) ...@@ -1255,7 +1266,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
} }
} }
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) static inline struct vmw_buffer_object *
vmw_bo_reference(struct vmw_buffer_object *buf)
{ {
if (ttm_bo_reference(&buf->base)) if (ttm_bo_reference(&buf->base))
return buf; return buf;
...@@ -1302,10 +1314,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr) ...@@ -1302,10 +1314,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{ {
WRITE_ONCE(*addr, value); WRITE_ONCE(*addr, value);
} }
/**
* Add vmw_msg module function
*/
extern int vmw_host_log(const char *log);
#endif #endif
...@@ -92,7 +92,7 @@ struct vmw_resource_val_node { ...@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
struct list_head head; struct list_head head;
struct drm_hash_item hash; struct drm_hash_item hash;
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_dma_buffer *new_backup; struct vmw_buffer_object *new_backup;
struct vmw_ctx_binding_state *staged_bindings; struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset; unsigned long new_backup_offset;
u32 first_usage : 1; u32 first_usage : 1;
...@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAMobId *id, SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p); struct vmw_buffer_object **vmw_bo_p);
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo, struct vmw_buffer_object *vbo,
bool validate_as_mob, bool validate_as_mob,
uint32_t *p_val_node); uint32_t *p_val_node);
/** /**
...@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context, ...@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
} }
vmw_resource_unreserve(res, switch_backup, val->new_backup, vmw_resource_unreserve(res, switch_backup, val->new_backup,
val->new_backup_offset); val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup); vmw_bo_unreference(&val->new_backup);
} }
} }
...@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, ...@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
} }
if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx); dx_query_mob = vmw_context_get_dx_query_mob(ctx);
if (dx_query_mob) if (dx_query_mob)
...@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, ...@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
* submission is reached. * submission is reached.
*/ */
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo, struct vmw_buffer_object *vbo,
bool validate_as_mob, bool validate_as_mob,
uint32_t *p_val_node) uint32_t *p_val_node)
{ {
...@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret; return ret;
if (res->backup) { if (res->backup) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list ret = vmw_bo_to_validate_list
(sw_context, vbo, (sw_context, vbo,
...@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) ...@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
} }
if (sw_context->dx_query_mob) { if (sw_context->dx_query_mob) {
struct vmw_dma_buffer *expected_dx_query_mob; struct vmw_buffer_object *expected_dx_query_mob;
expected_dx_query_mob = expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx); vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
...@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) ...@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
list_for_each_entry(val, &sw_context->resource_list, head) { list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res; struct vmw_resource *res = val->res;
struct vmw_dma_buffer *backup = res->backup; struct vmw_buffer_object *backup = res->backup;
ret = vmw_resource_validate(res); ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
...@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) ...@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
/* Check if the resource switched backup buffer */ /* Check if the resource switched backup buffer */
if (backup && res->backup && (backup != res->backup)) { if (backup && res->backup && (backup != res->backup)) {
struct vmw_dma_buffer *vbo = res->backup; struct vmw_buffer_object *vbo = res->backup;
ret = vmw_bo_to_validate_list ret = vmw_bo_to_validate_list
(sw_context, vbo, (sw_context, vbo,
...@@ -821,7 +821,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, ...@@ -821,7 +821,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{ {
struct vmw_private *dev_priv = ctx_res->dev_priv; struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_dma_buffer *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct { struct {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDXBindAllQuery body; SVGA3dCmdDXBindAllQuery body;
...@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, ...@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* command batch. * command batch.
*/ */
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *new_query_bo, struct vmw_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context) struct vmw_sw_context *sw_context)
{ {
struct vmw_res_cache_entry *ctx_entry = struct vmw_res_cache_entry *ctx_entry =
...@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo != sw_context->cur_query_bo) { if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) { if (dev_priv->pinned_bo) {
vmw_bo_pin_reserved(dev_priv->pinned_bo, false); vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
if (!sw_context->needs_post_query_barrier) { if (!sw_context->needs_post_query_barrier) {
...@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
dev_priv->query_cid = sw_context->last_query_ctx->id; dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true; dev_priv->query_cid_valid = true;
dev_priv->pinned_bo = dev_priv->pinned_bo =
vmw_dmabuf_reference(sw_context->cur_query_bo); vmw_bo_reference(sw_context->cur_query_bo);
} }
} }
} }
...@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ...@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAMobId *id, SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = *id; uint32_t handle = *id;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
return 0; return 0;
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -1343,15 +1342,14 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1343,15 +1342,14 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr, SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p) struct vmw_buffer_object **vmw_bo_p)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
uint32_t handle = ptr->gmrId; uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo, ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0; return 0;
out_no_reloc: out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
*vmw_bo_p = NULL; *vmw_bo_p = NULL;
return ret; return ret;
} }
...@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdDXBindQuery q; SVGA3dCmdDXBindQuery q;
} *cmd; } *cmd;
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
...@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, ...@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
sw_context->dx_query_mob = vmw_bo; sw_context->dx_query_mob = vmw_bo;
sw_context->dx_query_ctx = sw_context->dx_ctx_node->res; sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdEndGBQuery q; SVGA3dCmdEndGBQuery q;
...@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, ...@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdEndQuery q; SVGA3dCmdEndQuery q;
...@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv, ...@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context); ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery q; SVGA3dCmdWaitForGBQuery q;
...@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, ...@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
struct vmw_query_cmd { struct vmw_query_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery q; SVGA3dCmdWaitForQuery q;
...@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, ...@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return 0; return 0;
} }
...@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header) SVGA3dCmdHeader *header)
{ {
struct vmw_dma_buffer *vmw_bo = NULL; struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL; struct vmw_surface *srf = NULL;
struct vmw_dma_cmd { struct vmw_dma_cmd {
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
...@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, ...@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
header); header);
out_no_surface: out_no_surface:
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context, struct vmw_sw_context *sw_context,
void *buf) void *buf)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
struct { struct {
...@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, ...@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_dmabuf_unreference(&vmw_bo); vmw_bo_unreference(&vmw_bo);
return ret; return ret;
} }
...@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
uint32_t *buf_id, uint32_t *buf_id,
unsigned long backup_offset) unsigned long backup_offset)
{ {
struct vmw_dma_buffer *dma_buf; struct vmw_buffer_object *dma_buf;
int ret; int ret;
ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
...@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, ...@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
if (val_node->first_usage) if (val_node->first_usage)
val_node->no_buffer_needed = true; val_node->no_buffer_needed = true;
vmw_dmabuf_unreference(&val_node->new_backup); vmw_bo_unreference(&val_node->new_backup);
val_node->new_backup = dma_buf; val_node->new_backup = dma_buf;
val_node->new_backup_offset = backup_offset; val_node->new_backup_offset = backup_offset;
...@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
bool interruptible, bool interruptible,
bool validate_as_mob) bool validate_as_mob)
{ {
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, struct vmw_buffer_object *vbo =
base); container_of(bo, struct vmw_buffer_object, base);
struct ttm_operation_ctx ctx = { interruptible, true }; struct ttm_operation_ctx ctx = { interruptible, true };
int ret; int ret;
...@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
out_unlock: out_unlock:
return; return;
...@@ -4432,7 +4430,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -4432,7 +4430,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
out_no_reserve: out_no_reserve:
ttm_bo_unref(&query_val.bo); ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo); ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo); vmw_bo_unreference(&dev_priv->pinned_bo);
} }
/** /**
......
...@@ -42,7 +42,7 @@ struct vmw_fb_par { ...@@ -42,7 +42,7 @@ struct vmw_fb_par {
void *vmalloc; void *vmalloc;
struct mutex bo_mutex; struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
unsigned bo_size; unsigned bo_size;
struct drm_framebuffer *set_fb; struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode; struct drm_display_mode *set_mode;
...@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) ...@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct drm_clip_rect clip; struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb; struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr; u8 *src_ptr, *dst_ptr;
struct vmw_dma_buffer *vbo = par->vmw_bo; struct vmw_buffer_object *vbo = par->vmw_bo;
void *virtual; void *virtual;
if (!READ_ONCE(par->dirty.active)) if (!READ_ONCE(par->dirty.active))
...@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) ...@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
(void) ttm_read_lock(&vmw_priv->reservation_sem, false); (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL); (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
virtual = vmw_dma_buffer_map_and_cache(vbo); virtual = vmw_bo_map_and_cache(vbo);
if (!virtual) if (!virtual)
goto out_unreserve; goto out_unreserve;
...@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) ...@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
*/ */
static int vmw_fb_create_bo(struct vmw_private *vmw_priv, static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out) size_t size, struct vmw_buffer_object **out)
{ {
struct vmw_dma_buffer *vmw_bo; struct vmw_buffer_object *vmw_bo;
int ret; int ret;
(void) ttm_write_lock(&vmw_priv->reservation_sem, false); (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
...@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, ...@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock; goto err_unlock;
} }
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement, &vmw_sys_placement,
false, false,
&vmw_dmabuf_bo_free); &vmw_bo_bo_free);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */ goto err_unlock; /* init frees the buffer on failure */
...@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, ...@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
} }
if (par->vmw_bo && detach_bo && unref_bo) if (par->vmw_bo && detach_bo && unref_bo)
vmw_dmabuf_unreference(&par->vmw_bo); vmw_bo_unreference(&par->vmw_bo);
return 0; return 0;
} }
......
...@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
struct vmw_private *dev_priv = fman->dev_priv; struct vmw_private *dev_priv = fman->dev_priv;
struct vmwgfx_wait_cb cb; struct vmwgfx_wait_cb cb;
long ret = timeout; long ret = timeout;
unsigned long irq_flags;
if (likely(vmw_fence_obj_signaled(fence))) if (likely(vmw_fence_obj_signaled(fence)))
return timeout; return timeout;
...@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv); vmw_seqno_waiter_add(dev_priv);
spin_lock_irqsave(f->lock, irq_flags); spin_lock(f->lock);
if (intr && signal_pending(current)) { if (intr && signal_pending(current)) {
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
...@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) ...@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
cb.task = current; cb.task = current;
list_add(&cb.base.node, &f->cb_list); list_add(&cb.base.node, &f->cb_list);
while (ret > 0) { for (;;) {
__vmw_fences_update(fman); __vmw_fences_update(fman);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
break;
/*
* We can use the barrier free __set_current_state() since
* DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
* fence spinlock.
*/
if (intr) if (intr)
__set_current_state(TASK_INTERRUPTIBLE); __set_current_state(TASK_INTERRUPTIBLE);
else else
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(f->lock, irq_flags);
ret = schedule_timeout(ret); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
if (ret == 0 && timeout > 0)
ret = 1;
break;
}
spin_lock_irqsave(f->lock, irq_flags); if (intr && signal_pending(current)) {
if (ret > 0 && intr && signal_pending(current))
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
} break;
}
if (ret == 0)
break;
spin_unlock(f->lock);
ret = schedule_timeout(ret);
spin_lock(f->lock);
}
__set_current_state(TASK_RUNNING);
if (!list_empty(&cb.base.node)) if (!list_empty(&cb.base.node))
list_del(&cb.base.node); list_del(&cb.base.node);
__set_current_state(TASK_RUNNING);
out: out:
spin_unlock_irqrestore(f->lock, irq_flags); spin_unlock(f->lock);
vmw_seqno_waiter_remove(dev_priv); vmw_seqno_waiter_remove(dev_priv);
......
...@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data, ...@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
} }
vfb = vmw_framebuffer_to_vfb(fb); vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->dmabuf) { if (!vfb->bo) {
DRM_ERROR("Framebuffer not dmabuf backed.\n"); DRM_ERROR("Framebuffer not buffer backed.\n");
ret = -EINVAL; ret = -EINVAL;
goto out_no_ttm_lock; goto out_no_ttm_lock;
} }
......
此差异已折叠。
...@@ -90,7 +90,7 @@ struct vmw_kms_dirty { ...@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
#define vmw_framebuffer_to_vfbs(x) \ #define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base) container_of(x, struct vmw_framebuffer_surface, base.base)
#define vmw_framebuffer_to_vfbd(x) \ #define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base) container_of(x, struct vmw_framebuffer_bo, base.base)
/** /**
* Base class for framebuffers * Base class for framebuffers
...@@ -102,7 +102,7 @@ struct vmw_framebuffer { ...@@ -102,7 +102,7 @@ struct vmw_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb); int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb);
bool dmabuf; bool bo;
struct ttm_base_object *user_obj; struct ttm_base_object *user_obj;
uint32_t user_handle; uint32_t user_handle;
}; };
...@@ -117,15 +117,15 @@ struct vmw_clip_rect { ...@@ -117,15 +117,15 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface { struct vmw_framebuffer_surface {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_surface *surface; struct vmw_surface *surface;
struct vmw_dma_buffer *buffer; struct vmw_buffer_object *buffer;
struct list_head head; struct list_head head;
bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */ bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
}; };
struct vmw_framebuffer_dmabuf { struct vmw_framebuffer_bo {
struct vmw_framebuffer base; struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer; struct vmw_buffer_object *buffer;
}; };
...@@ -161,18 +161,18 @@ struct vmw_crtc_state { ...@@ -161,18 +161,18 @@ struct vmw_crtc_state {
* *
* @base DRM plane object * @base DRM plane object
* @surf Display surface for STDU * @surf Display surface for STDU
* @dmabuf display dmabuf for SOU * @bo display bo for SOU
* @content_fb_type Used by STDU. * @content_fb_type Used by STDU.
* @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit * @bo_size Size of the bo, used by Screen Object Display Unit
* @pinned pin count for STDU display surface * @pinned pin count for STDU display surface
*/ */
struct vmw_plane_state { struct vmw_plane_state {
struct drm_plane_state base; struct drm_plane_state base;
struct vmw_surface *surf; struct vmw_surface *surf;
struct vmw_dma_buffer *dmabuf; struct vmw_buffer_object *bo;
int content_fb_type; int content_fb_type;
unsigned long dmabuf_size; unsigned long bo_size;
int pinned; int pinned;
...@@ -192,6 +192,24 @@ struct vmw_connector_state { ...@@ -192,6 +192,24 @@ struct vmw_connector_state {
struct drm_connector_state base; struct drm_connector_state base;
bool is_implicit; bool is_implicit;
/**
* @gui_x:
*
* vmwgfx connector property representing the x position of this display
* unit (connector is synonymous to display unit) in overall topology.
* This is what the device expect as xRoot while creating screen.
*/
int gui_x;
/**
* @gui_y:
*
* vmwgfx connector property representing the y position of this display
* unit (connector is synonymous to display unit) in overall topology.
* This is what the device expect as yRoot while creating screen.
*/
int gui_y;
}; };
/** /**
...@@ -209,7 +227,7 @@ struct vmw_display_unit { ...@@ -209,7 +227,7 @@ struct vmw_display_unit {
struct drm_plane cursor; struct drm_plane cursor;
struct vmw_surface *cursor_surface; struct vmw_surface *cursor_surface;
struct vmw_dma_buffer *cursor_dmabuf; struct vmw_buffer_object *cursor_bo;
size_t cursor_age; size_t cursor_age;
int cursor_x; int cursor_x;
...@@ -243,7 +261,7 @@ struct vmw_display_unit { ...@@ -243,7 +261,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx { struct vmw_validation_ctx {
struct vmw_resource *res; struct vmw_resource *res;
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
}; };
#define vmw_crtc_to_du(x) \ #define vmw_crtc_to_du(x) \
...@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct vmw_kms_dirty *dirty); struct vmw_kms_dirty *dirty);
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
bool interruptible, bool interruptible,
bool validate_as_mob, bool validate_as_mob,
bool for_cpu_blit); bool for_cpu_blit);
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf); void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_vmw_fence_rep __user * struct drm_vmw_fence_rep __user *
user_fence_rep); user_fence_rep);
...@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, ...@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips); uint32_t num_clips);
struct vmw_framebuffer * struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv, vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf, struct vmw_buffer_object *bo,
struct vmw_surface *surface, struct vmw_surface *surface,
bool only_2d, bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_mode_fb_cmd2 *mode_cmd);
...@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector, ...@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
*/ */
int vmw_kms_ldu_init_display(struct vmw_private *dev_priv); int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv); int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color, unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned num_clips, int increment); unsigned int num_clips, int increment);
int vmw_kms_update_proxy(struct vmw_resource *res, int vmw_kms_update_proxy(struct vmw_resource *res,
const struct drm_clip_rect *clips, const struct drm_clip_rect *clips,
unsigned num_clips, unsigned num_clips,
...@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc, unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips, struct drm_vmw_rect *vclips,
unsigned num_clips, int increment, unsigned int num_clips, int increment,
bool interruptible, bool interruptible,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv, int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv, struct drm_file *file_priv,
struct vmw_framebuffer *vfb, struct vmw_framebuffer *vfb,
......
...@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv) ...@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
} }
int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color, unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned num_clips, int increment) unsigned int num_clips, int increment)
{ {
size_t fifo_size; size_t fifo_size;
int i; int i;
......
...@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, ...@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} }
...@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ...@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(bo, false, true, NULL); ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
ttm_bo_unref(&batch->otable_bo); ttm_bo_unref(&batch->otable_bo);
...@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, ...@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
} }
if (bo) { if (bo) {
vmw_fence_single_bo(bo, NULL); vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} }
vmw_fifo_resource_dec(dev_priv); vmw_fifo_resource_dec(dev_priv);
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/frame.h> #include <linux/frame.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_msg.h" #include "vmwgfx_msg.h"
...@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, ...@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 || if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
(HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) { (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
DRM_ERROR("Failed to get reply size\n"); DRM_ERROR("Failed to get reply size for host message.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, ...@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx; reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL); reply = kzalloc(reply_len + 1, GFP_KERNEL);
if (!reply) { if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n"); DRM_ERROR("Cannot allocate memory for host message reply.\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param, ...@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) { if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
guest_info_param);
return -ENOMEM; return -ENOMEM;
} }
...@@ -374,7 +376,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, ...@@ -374,7 +376,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
out_open: out_open:
*length = 0; *length = 0;
kfree(msg); kfree(msg);
DRM_ERROR("Failed to get %s", guest_info_param); DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL; return -EINVAL;
} }
...@@ -403,7 +405,7 @@ int vmw_host_log(const char *log) ...@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
msg = kasprintf(GFP_KERNEL, "log %s", log); msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) { if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n"); DRM_ERROR("Cannot allocate memory for host log message.\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -422,7 +424,7 @@ int vmw_host_log(const char *log) ...@@ -422,7 +424,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel); vmw_close_channel(&channel);
out_open: out_open:
kfree(msg); kfree(msg);
DRM_ERROR("Failed to send log\n"); DRM_ERROR("Failed to send host log message.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE) #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream { struct vmw_stream {
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
bool claimed; bool claimed;
bool paused; bool paused;
struct drm_vmw_control_stream_arg saved; struct drm_vmw_control_stream_arg saved;
...@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, ...@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal. * -ERESTARTSYS if interrupted by a signal.
*/ */
static int vmw_overlay_send_put(struct vmw_private *dev_priv, static int vmw_overlay_send_put(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg, struct drm_vmw_control_stream_arg *arg,
bool interruptible) bool interruptible)
{ {
...@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, ...@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram. * used with GMRs instead of being locked to vram.
*/ */
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
bool pin, bool inter) bool pin, bool inter)
{ {
if (!pin) if (!pin)
return vmw_dmabuf_unpin(dev_priv, buf, inter); return vmw_bo_unpin(dev_priv, buf, inter);
if (dev_priv->active_display_unit == vmw_du_legacy) if (dev_priv->active_display_unit == vmw_du_legacy)
return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter); return vmw_bo_pin_in_vram(dev_priv, buf, inter);
return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter); return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
} }
/** /**
...@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, ...@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
} }
if (!pause) { if (!pause) {
vmw_dmabuf_unreference(&stream->buf); vmw_bo_unreference(&stream->buf);
stream->paused = false; stream->paused = false;
} else { } else {
stream->paused = true; stream->paused = true;
...@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, ...@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted. * -ERESTARTSYS if interrupted.
*/ */
static int vmw_overlay_update_stream(struct vmw_private *dev_priv, static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_buffer_object *buf,
struct drm_vmw_control_stream_arg *arg, struct drm_vmw_control_stream_arg *arg,
bool interruptible) bool interruptible)
{ {
...@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, ...@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
} }
if (stream->buf != buf) if (stream->buf != buf)
stream->buf = vmw_dmabuf_reference(buf); stream->buf = vmw_bo_reference(buf);
stream->saved = *arg; stream->saved = *arg;
/* stream is no longer stopped/paused */ /* stream is no longer stopped/paused */
stream->paused = false; stream->paused = false;
...@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg = struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data; (struct drm_vmw_control_stream_arg *)data;
struct vmw_dma_buffer *buf; struct vmw_buffer_object *buf;
struct vmw_resource *res; struct vmw_resource *res;
int ret; int ret;
...@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_dmabuf_unreference(&buf); vmw_bo_unreference(&buf);
out_unlock: out_unlock:
mutex_unlock(&overlay->mutex); mutex_unlock(&overlay->mutex);
......
...@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit { ...@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
SVGAFifoCmdBlitScreenToGMRFB body; SVGAFifoCmdBlitScreenToGMRFB body;
}; };
struct vmw_kms_sou_dmabuf_blit { struct vmw_kms_sou_bo_blit {
uint32 header; uint32 header;
SVGAFifoCmdBlitGMRFBToScreen body; SVGAFifoCmdBlitGMRFBToScreen body;
}; };
...@@ -83,7 +83,7 @@ struct vmw_screen_object_unit { ...@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base; struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */ unsigned long buffer_size; /**< Size of allocated buffer */
struct vmw_dma_buffer *buffer; /**< Backing store buffer */ struct vmw_buffer_object *buffer; /**< Backing store buffer */
bool defined; bool defined;
}; };
...@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) ...@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
*/ */
static int vmw_sou_fifo_create(struct vmw_private *dev_priv, static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou, struct vmw_screen_object_unit *sou,
uint32_t x, uint32_t y, int x, int y,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
size_t fifo_size; size_t fifo_size;
...@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, ...@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
cmd->obj.size.width = mode->hdisplay; cmd->obj.size.width = mode->hdisplay;
cmd->obj.size.height = mode->vdisplay; cmd->obj.size.height = mode->vdisplay;
if (sou->base.is_implicit) { cmd->obj.root.x = x;
cmd->obj.root.x = x; cmd->obj.root.y = y;
cmd->obj.root.y = y;
} else {
cmd->obj.root.x = sou->base.gui_x;
cmd->obj.root.y = sou->base.gui_y;
}
sou->base.set_gui_x = cmd->obj.root.x; sou->base.set_gui_x = cmd->obj.root.x;
sou->base.set_gui_y = cmd->obj.root.y; sou->base.set_gui_y = cmd->obj.root.y;
...@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_plane_state *vps; struct vmw_plane_state *vps;
int ret; int ret;
sou = vmw_crtc_to_sou(crtc);
sou = vmw_crtc_to_sou(crtc);
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
ps = crtc->primary->state; ps = crtc->primary->state;
fb = ps->fb; fb = ps->fb;
vps = vmw_plane_state_to_vps(ps); vps = vmw_plane_state_to_vps(ps);
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
...@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) ...@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
} }
if (vfb) { if (vfb) {
sou->buffer = vps->dmabuf; struct drm_connector_state *conn_state;
sou->buffer_size = vps->dmabuf_size; struct vmw_connector_state *vmw_conn_state;
int x, y;
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
if (sou->base.is_implicit) {
x = crtc->x;
y = crtc->y;
} else {
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
x = vmw_conn_state->gui_x;
y = vmw_conn_state->gui_y;
}
ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y, ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
&crtc->mode);
if (ret) if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n", DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y); crtc->x, crtc->y);
...@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, ...@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ? struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc; plane->state->crtc : old_state->crtc;
if (vps->dmabuf) if (vps->bo)
vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state); vmw_du_plane_cleanup_fb(plane, old_state);
} }
...@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!new_fb) { if (!new_fb) {
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
return 0; return 0;
} }
...@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
size = new_state->crtc_w * new_state->crtc_h * 4; size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev); dev_priv = vmw_priv(crtc->dev);
if (vps->dmabuf) { if (vps->bo) {
if (vps->dmabuf_size == size) { if (vps->bo_size == size) {
/* /*
* Note that this might temporarily up the pin-count * Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called. * to 2, until cleanup_fb() is called.
*/ */
return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, return vmw_bo_pin_in_vram(dev_priv, vps->bo,
true); true);
} }
vmw_dmabuf_unreference(&vps->dmabuf); vmw_bo_unreference(&vps->bo);
vps->dmabuf_size = 0; vps->bo_size = 0;
} }
vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
if (!vps->dmabuf) if (!vps->bo)
return -ENOMEM; return -ENOMEM;
vmw_svga_enable(dev_priv); vmw_svga_enable(dev_priv);
...@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, ...@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc. * resume the overlays, this is preferred to failing to alloc.
*/ */
vmw_overlay_pause_all(dev_priv); vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, ret = vmw_bo_init(dev_priv, vps->bo, size,
&vmw_vram_ne_placement, &vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free); false, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv); vmw_overlay_resume_all(dev_priv);
if (ret) { if (ret) {
vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ vps->bo = NULL; /* vmw_bo_init frees on error */
return ret; return ret;
} }
vps->dmabuf_size = size; vps->bo_size = size;
/* /*
* TTM already thinks the buffer is pinned, but make sure the * TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped. * pin_count is upped.
*/ */
return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
} }
...@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, ...@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
vclips.w = crtc->mode.hdisplay; vclips.w = crtc->mode.hdisplay;
vclips.h = crtc->mode.vdisplay; vclips.h = crtc->mode.vdisplay;
if (vfb->dmabuf) if (vfb->bo)
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
&vclips, 1, 1, true, &vclips, 1, 1, true,
&fence, crtc); &fence, crtc);
else else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
&vclips, NULL, 0, 0, &vclips, NULL, 0, 0,
...@@ -775,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) ...@@ -775,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0; return 0;
} }
static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer) struct vmw_framebuffer *framebuffer)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
int depth = framebuffer->base.format->depth; int depth = framebuffer->base.format->depth;
struct { struct {
...@@ -970,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ...@@ -970,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
} }
/** /**
* vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
* *
* @dirty: The closure structure. * @dirty: The closure structure.
* *
* Commits a previously built command buffer of readback clips. * Commits a previously built command buffer of readback clips.
*/ */
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
{ {
if (!dirty->num_hits) { if (!dirty->num_hits) {
vmw_fifo_commit(dirty->dev_priv, 0); vmw_fifo_commit(dirty->dev_priv, 0);
...@@ -984,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) ...@@ -984,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
} }
vmw_fifo_commit(dirty->dev_priv, vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_dmabuf_blit) * sizeof(struct vmw_kms_sou_bo_blit) *
dirty->num_hits); dirty->num_hits);
} }
/** /**
* vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. * vmw_sou_bo_clip - Callback to encode a readback cliprect.
* *
* @dirty: The closure structure * @dirty: The closure structure
* *
* Encodes a BLIT_GMRFB_TO_SCREEN cliprect. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
*/ */
static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
{ {
struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
blit += dirty->num_hits; blit += dirty->num_hits;
blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
...@@ -1012,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) ...@@ -1012,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
} }
/** /**
* vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the dma-buffer backed framebuffer. * @framebuffer: Pointer to the buffer-object backed framebuffer.
* @clips: Array of clip rects. * @clips: Array of clip rects.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must * @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL. * be NULL.
...@@ -1025,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) ...@@ -1025,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a * @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which * struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized. * case the device has already synchronized.
* @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. * @crtc: If crtc is passed, perform bo dirty on that crtc only.
* *
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted. * interrupted.
*/ */
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer, struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips, struct drm_vmw_rect *vclips,
...@@ -1039,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, ...@@ -1039,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence, struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf, container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer; base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
int ret; int ret;
...@@ -1050,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, ...@@ -1050,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); ret = do_bo_define_gmrfb(dev_priv, framebuffer);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
dirty.crtc = crtc; dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; dirty.fifo_commit = vmw_sou_bo_fifo_commit;
dirty.clip = vmw_sou_dmabuf_clip; dirty.clip = vmw_sou_bo_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
num_clips; num_clips;
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty); 0, 0, num_clips, increment, &dirty);
...@@ -1116,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) ...@@ -1116,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
/** /**
* vmw_kms_sou_readback - Perform a readback from the screen object system to * vmw_kms_sou_readback - Perform a readback from the screen object system to
* a dma-buffer backed framebuffer. * a buffer-object backed framebuffer.
* *
* @dev_priv: Pointer to the device private structure. * @dev_priv: Pointer to the device private structure.
* @file_priv: Pointer to a struct drm_file identifying the caller. * @file_priv: Pointer to a struct drm_file identifying the caller.
* Must be set to NULL if @user_fence_rep is NULL. * Must be set to NULL if @user_fence_rep is NULL.
* @vfb: Pointer to the dma-buffer backed framebuffer. * @vfb: Pointer to the buffer-object backed framebuffer.
* @user_fence_rep: User-space provided structure for fence information. * @user_fence_rep: User-space provided structure for fence information.
* Must be set to non-NULL if @file_priv is non-NULL. * Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects. * @vclips: Array of clip rects.
...@@ -1139,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1139,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips, uint32_t num_clips,
struct drm_crtc *crtc) struct drm_crtc *crtc)
{ {
struct vmw_dma_buffer *buf = struct vmw_buffer_object *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty; struct vmw_kms_dirty dirty;
int ret; int ret;
...@@ -1149,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, ...@@ -1149,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (ret) if (ret)
return ret; return ret;
ret = do_dmabuf_define_gmrfb(dev_priv, vfb); ret = do_bo_define_gmrfb(dev_priv, vfb);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_revert; goto out_revert;
......
...@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) ...@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
struct ttm_object_file *tfile = struct ttm_object_file *tfile =
vmw_fpriv((struct drm_file *)filp->private_data)->tfile; vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
return vmw_user_dmabuf_verify_access(bo, tfile); return vmw_user_bo_verify_access(bo, tfile);
} }
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
...@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, ...@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict, bool evict,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
vmw_resource_move_notify(bo, mem); vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem); vmw_query_move_notify(bo, mem);
} }
...@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo, ...@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/ */
static void vmw_swap_notify(struct ttm_buffer_object *bo) static void vmw_swap_notify(struct ttm_buffer_object *bo)
{ {
vmw_resource_swap_notify(bo); vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false); (void) ttm_bo_wait(bo, false, false);
} }
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册