提交 46b35b33 编写于 作者: C Christian König

dma-buf: wait for map to complete for static attachments

We have previously done that in the individual drivers but it is
more defensive to move that into the common code.

Dynamic attachments should wait for map operations to complete by themselves.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-12-christian.koenig@amd.com
上级 0cc848a7
...@@ -661,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, ...@@ -661,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
struct sg_table *sg_table; struct sg_table *sg_table;
signed long ret;
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (IS_ERR_OR_NULL(sg_table))
return sg_table;
if (!IS_ERR_OR_NULL(sg_table)) if (!dma_buf_attachment_is_dynamic(attach)) {
mangle_sg_table(sg_table); ret = dma_resv_wait_timeout(attach->dmabuf->resv,
DMA_RESV_USAGE_KERNEL, true,
MAX_SCHEDULE_TIMEOUT);
if (ret < 0) {
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
direction);
return ERR_PTR(ret);
}
}
mangle_sg_table(sg_table);
return sg_table; return sg_table;
} }
......
...@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) ...@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = attach->dmabuf->priv; struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int r;
/* pin buffer into GTT */ /* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
return r;
if (bo->tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true);
if (r) {
amdgpu_bo_unpin(bo);
return r;
}
}
return 0;
} }
/** /**
......
...@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj) ...@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret) if (ret)
return -EINVAL; return -EINVAL;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); return 0;
if (ret)
goto error;
if (nvbo->bo.moving)
ret = dma_fence_wait(nvbo->bo.moving, true);
ttm_bo_unreserve(&nvbo->bo);
if (ret)
goto error;
return ret;
error:
nouveau_bo_unpin(nvbo);
return ret;
} }
void nouveau_gem_prime_unpin(struct drm_gem_object *obj) void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
......
...@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj) ...@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */ /* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
if (unlikely(ret)) if (likely(ret == 0))
goto error;
if (bo->tbo.moving) {
ret = dma_fence_wait(bo->tbo.moving, false);
if (unlikely(ret)) {
radeon_bo_unpin(bo);
goto error;
}
}
bo->prime_shared_count++; bo->prime_shared_count++;
error:
radeon_bo_unreserve(bo); radeon_bo_unreserve(bo);
return ret; return ret;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册