提交 f237750f 编写于 作者: C Christian König 提交者: Dave Airlie

drm/radeon: remove r600 blit mutex v2

If we don't store local data into global variables
it isn't necessary to lock anything.

v2: rebased on new SA interface
Signed-off-by: NChristian König <deathsimple@vodafone.de>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 68470ae7
......@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
......
......@@ -2363,20 +2363,15 @@ int r600_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
struct radeon_sa_bo *vb = NULL;
int r;
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
r = r600_blit_prepare_copy(rdev, num_gpu_pages);
r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
r600_blit_done_copy(rdev, fence, vb);
return 0;
}
......
......@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
......@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
......@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
{
int r;
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
&rdev->r600_blit.vb_ib, size);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
rdev->r600_blit.vb_total = size;
rdev->r600_blit.vb_used = 0;
return 0;
}
static void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
......@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
}
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_sa_bo **vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
......@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
}
/* 48 bytes for vertex per loop */
r = r600_vb_ib_get(rdev, (num_loops*48)+256);
if (r)
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
(num_loops*48)+256, 256, true);
if (r) {
return r;
}
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
if (r)
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
return r;
}
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
struct radeon_sa_bo *vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
if (fence)
r = radeon_fence_emit(rdev, fence);
r = radeon_fence_emit(rdev, fence);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return;
}
radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
radeon_ring_unlock_commit(rdev, ring);
radeon_sa_bo_free(rdev, &vb, fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages)
unsigned num_gpu_pages,
struct radeon_sa_bo *vb)
{
u64 vb_gpu_addr;
u32 *vb;
u32 *vb_cpu_addr;
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
src_gpu_addr, dst_gpu_addr,
num_gpu_pages, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
DRM_DEBUG("emitting copy %16llx %16llx %d\n",
src_gpu_addr, dst_gpu_addr, num_gpu_pages);
vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
while (num_gpu_pages) {
int w, h;
......@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
WARN_ON(1);
}
vb[0] = 0;
vb[1] = 0;
vb[2] = 0;
vb[3] = 0;
vb_cpu_addr[0] = 0;
vb_cpu_addr[1] = 0;
vb_cpu_addr[2] = 0;
vb_cpu_addr[3] = 0;
vb[4] = 0;
vb[5] = i2f(h);
vb[6] = 0;
vb[7] = i2f(h);
vb_cpu_addr[4] = 0;
vb_cpu_addr[5] = i2f(h);
vb_cpu_addr[6] = 0;
vb_cpu_addr[7] = i2f(h);
vb[8] = i2f(w);
vb[9] = i2f(h);
vb[10] = i2f(w);
vb[11] = i2f(h);
vb_cpu_addr[8] = i2f(w);
vb_cpu_addr[9] = i2f(h);
vb_cpu_addr[10] = i2f(w);
vb_cpu_addr[11] = i2f(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
vb += 12;
rdev->r600_blit.vb_used += 4*12;
vb_cpu_addr += 12;
vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
......
......@@ -743,7 +743,6 @@ struct r600_blit_cp_primitives {
};
struct r600_blit {
struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
......@@ -753,8 +752,6 @@ struct r600_blit {
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
u32 vb_used, vb_total;
struct radeon_ib *vb_ib;
};
void r600_blit_suspend(struct radeon_device *rdev);
......
......@@ -368,11 +368,14 @@ void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_sa_bo **vb);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
struct radeon_sa_bo *vb);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages);
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册