提交 10dd74ea 编写于 作者: J James Zhu 提交者: Alex Deucher

drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances

Vega20 has dual-UVD. Need add multiple instances support for uvd.
Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->.
Repurpose amdgpu_ring::me for instance index, and initialize to 0.
There are no any logical changes here.
Signed-off-by: NJames Zhu <James.Zhu@amd.com>
Reviewed-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 2bb795f5
...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint64_t index; uint64_t index;
if (ring != &adev->uvd.inst->ring) { if (ring != &adev->uvd.inst[ring->me].ring) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else { } else {
/* put fence directly behind firmware */ /* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8); index = ALIGN(adev->uvd.fw->size, 8);
ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
} }
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type); amdgpu_irq_get(adev, irq_src, irq_type);
......
...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_crtc *crtc; struct drm_crtc *crtc;
uint32_t ui32 = 0; uint32_t ui32 = 0;
uint64_t ui64 = 0; uint64_t ui64 = 0;
int i, found; int i, j, found;
int ui32_size = sizeof(ui32); int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer) if (!info->return_size || !info->return_pointer)
...@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16; ib_size_alignment = 16;
break; break;
...@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++) for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); for (j = 0; j < adev->uvd.num_enc_rings; j++)
ring_mask |=
((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
(j + i * adev->uvd.num_enc_rings));
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
......
...@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ...@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
dma_fence_put(ring->vmid_wait); dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL; ring->vmid_wait = NULL;
ring->me = 0;
ring->adev->rings[ring->idx] = NULL; ring->adev->rings[ring->idx] = NULL;
} }
......
...@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
const char *fw_name; const char *fw_name;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned version_major, version_minor, family_id; unsigned version_major, version_minor, family_id;
int i, r; int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
...@@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo,
&adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
ring = &adev->uvd.inst->ring; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
rq, NULL); if (r) {
if (r != 0) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
DRM_ERROR("Failed setting up UVD run queue.\n"); return r;
return r; }
}
for (i = 0; i < adev->uvd.max_handles; ++i) { ring = &adev->uvd.inst[j].ring;
atomic_set(&adev->uvd.inst->handles[i], 0); rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
adev->uvd.inst->filp[i] = NULL; r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
} rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r;
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.inst[j].handles[i], 0);
adev->uvd.inst[j].filp[i] = NULL;
}
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true; adev->uvd.address_64_bit = true;
...@@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i; int i, j;
kfree(adev->uvd.inst->saved_bo);
drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
&adev->uvd.inst->gpu_addr,
(void **)&adev->uvd.inst->cpu_addr);
amdgpu_ring_fini(&adev->uvd.inst->ring); amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
(void **)&adev->uvd.inst[j].cpu_addr);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring);
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
return 0; return 0;
...@@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i; int i, j;
if (adev->uvd.inst->vcpu_bo == NULL) for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
return 0; if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
cancel_delayed_work_sync(&adev->uvd.inst->idle_work); cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
/* only valid for physical mode */ /* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) { if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.inst->handles[i])) if (atomic_read(&adev->uvd.inst[j].handles[i]))
break; break;
if (i == adev->uvd.max_handles) if (i == adev->uvd.max_handles)
return 0; continue;
} }
size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo);
ptr = adev->uvd.inst->cpu_addr;
adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
if (!adev->uvd.inst->saved_bo) ptr = adev->uvd.inst[j].cpu_addr;
return -ENOMEM;
memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
}
return 0; return 0;
} }
...@@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i;
if (adev->uvd.inst->vcpu_bo == NULL) for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
return -EINVAL; if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
ptr = adev->uvd.inst->cpu_addr; ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.inst->saved_bo != NULL) { if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
kfree(adev->uvd.inst->saved_bo); kfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst->saved_bo = NULL; adev->uvd.inst[i].saved_bo = NULL;
} else { } else {
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned offset; unsigned offset;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data; hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes); offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes)); le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes); size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
} }
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring);
} }
return 0; return 0;
} }
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{ {
struct amdgpu_ring *ring = &adev->uvd.inst->ring; struct amdgpu_ring *ring;
int i, r; int i, j, r;
for (i = 0; i < adev->uvd.max_handles; ++i) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); ring = &adev->uvd.inst[j].ring;
if (handle != 0 && adev->uvd.inst->filp[i] == filp) {
struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
}
dma_fence_wait(fence, false); for (i = 0; i < adev->uvd.max_handles; ++i) {
dma_fence_put(fence); uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
struct dma_fence *fence;
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
continue;
}
adev->uvd.inst->filp[i] = NULL; dma_fence_wait(fence, false);
atomic_set(&adev->uvd.inst->handles[i], 0); dma_fence_put(fence);
adev->uvd.inst[j].filp[i] = NULL;
atomic_set(&adev->uvd.inst[j].handles[i], 0);
}
} }
} }
} }
...@@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr; void *ptr;
long r; long r;
int i; int i;
uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) { if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n"); DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
r = amdgpu_bo_kmap(bo, &ptr); r = amdgpu_bo_kmap(bo, &ptr);
if (r) { if (r) {
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
return r; return r;
} }
...@@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2]; handle = msg[2];
if (handle == 0) { if (handle == 0) {
DRM_ERROR("Invalid UVD handle!\n"); DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
...@@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */ /* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle); DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
return -EINVAL; return -EINVAL;
} }
if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
adev->uvd.inst->filp[i] = ctx->parser->filp; adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
return 0; return 0;
} }
} }
DRM_ERROR("No more free UVD handles!\n"); DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
return -ENOSPC; return -ENOSPC;
case 1: case 1:
...@@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */ /* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
if (adev->uvd.inst->filp[i] != ctx->parser->filp) { if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n"); DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
} }
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
return -ENOENT; return -ENOENT;
case 2: case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
return 0; return 0;
default: default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
return -EINVAL; return -EINVAL;
} }
BUG(); BUG();
...@@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
...@@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence; struct dma_fence *fence;
long r; long r;
uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL); r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
goto error; goto error;
} }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
goto error; goto error;
} }
r = dma_fence_wait_timeout(fence, false, timeout); r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) { if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n"); DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
r = -ETIMEDOUT; r = -ETIMEDOUT;
} else if (r < 0) { } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
} else { } else {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
r = 0; r = 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册