提交 0d346a14 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: use entity instead of ring for CS

Further demangle ring from entity handling.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NChunming Zhou <david1.zhou@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 8290268f
...@@ -523,7 +523,7 @@ struct amdgpu_cs_parser { ...@@ -523,7 +523,7 @@ struct amdgpu_cs_parser {
/* scheduler job object */ /* scheduler job object */
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ring *ring; struct drm_sched_entity *entity;
/* buffer objects */ /* buffer objects */
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
......
...@@ -893,13 +893,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -893,13 +893,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p) struct amdgpu_cs_parser *p)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->ring;
int r; int r;
/* Only for UVD/VCE VM emulation */ /* Only for UVD/VCE VM emulation */
if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) { if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
unsigned i, j; unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
...@@ -940,7 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, ...@@ -940,7 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
offset = m->start * AMDGPU_GPU_PAGE_SIZE; offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += va_start - offset; kptr += va_start - offset;
if (p->ring->funcs->parse_cs) { if (ring->funcs->parse_cs) {
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj); amdgpu_bo_kunmap(aobj);
...@@ -979,14 +979,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -979,14 +979,15 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
int i, j;
int r, ce_preempt = 0, de_preempt = 0; int r, ce_preempt = 0, de_preempt = 0;
struct amdgpu_ring *ring;
int i, j;
for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) { for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
struct amdgpu_cs_chunk *chunk; struct amdgpu_cs_chunk *chunk;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct drm_amdgpu_cs_chunk_ib *chunk_ib; struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_ring *ring; struct drm_sched_entity *entity;
chunk = &parser->chunks[i]; chunk = &parser->chunks[i];
ib = &parser->job->ibs[j]; ib = &parser->job->ibs[j];
...@@ -1008,9 +1009,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1008,9 +1009,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
r = amdgpu_ctx_get_ring(parser->ctx, chunk_ib->ip_type, r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
chunk_ib->ip_instance, chunk_ib->ring, chunk_ib->ip_instance, chunk_ib->ring,
&ring); &entity);
if (r) if (r)
return r; return r;
...@@ -1018,14 +1019,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1018,14 +1019,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->preamble_status |= parser->job->preamble_status |=
AMDGPU_PREAMBLE_IB_PRESENT; AMDGPU_PREAMBLE_IB_PRESENT;
if (parser->ring && parser->ring != ring) if (parser->entity && parser->entity != entity)
return -EINVAL; return -EINVAL;
parser->ring = ring; parser->entity = entity;
r = amdgpu_ib_get(adev, vm, ring = to_amdgpu_ring(entity->rq->sched);
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0, r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
ib); chunk_ib->ib_bytes : 0, ib);
if (r) { if (r) {
DRM_ERROR("Failed to get ib !\n"); DRM_ERROR("Failed to get ib !\n");
return r; return r;
...@@ -1039,12 +1040,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -1039,12 +1040,13 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
} }
/* UVD & VCE fw doesn't support user fences */ /* UVD & VCE fw doesn't support user fences */
ring = to_amdgpu_ring(parser->entity->rq->sched);
if (parser->job->uf_addr && ( if (parser->job->uf_addr && (
parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD || ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL; return -EINVAL;
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx); return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
} }
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
...@@ -1060,23 +1062,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, ...@@ -1060,23 +1062,23 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
sizeof(struct drm_amdgpu_cs_chunk_dep); sizeof(struct drm_amdgpu_cs_chunk_dep);
for (i = 0; i < num_deps; ++i) { for (i = 0; i < num_deps; ++i) {
struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct drm_sched_entity *entity;
struct dma_fence *fence; struct dma_fence *fence;
ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
if (ctx == NULL) if (ctx == NULL)
return -EINVAL; return -EINVAL;
r = amdgpu_ctx_get_ring(ctx, deps[i].ip_type, r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
deps[i].ip_instance, deps[i].ip_instance,
deps[i].ring, &ring); deps[i].ring, &entity);
if (r) { if (r) {
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return r; return r;
} }
fence = amdgpu_ctx_get_fence(ctx, ring, fence = amdgpu_ctx_get_fence(ctx, entity,
deps[i].handle); deps[i].handle);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
r = PTR_ERR(fence); r = PTR_ERR(fence);
...@@ -1195,9 +1197,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1195,9 +1197,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs) union drm_amdgpu_cs *cs)
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ring *ring = p->ring; struct drm_sched_entity *entity = p->entity;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
enum drm_sched_priority priority; enum drm_sched_priority priority;
struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job; struct amdgpu_job *job;
uint64_t seq; uint64_t seq;
...@@ -1227,7 +1229,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1227,7 +1229,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp; job->owner = p->filp;
p->fence = dma_fence_get(&job->base.s_fence->finished); p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq); r = amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
if (r) { if (r) {
dma_fence_put(p->fence); dma_fence_put(p->fence);
dma_fence_put(&job->base.s_fence->finished); dma_fence_put(&job->base.s_fence->finished);
...@@ -1332,7 +1334,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, ...@@ -1332,7 +1334,7 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{ {
union drm_amdgpu_wait_cs *wait = data; union drm_amdgpu_wait_cs *wait = data;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL; struct drm_sched_entity *entity;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct dma_fence *fence; struct dma_fence *fence;
long r; long r;
...@@ -1341,14 +1343,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, ...@@ -1341,14 +1343,14 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (ctx == NULL) if (ctx == NULL)
return -EINVAL; return -EINVAL;
r = amdgpu_ctx_get_ring(ctx, wait->in.ip_type, wait->in.ip_instance, r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring); wait->in.ring, &entity);
if (r) { if (r) {
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return r; return r;
} }
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
if (IS_ERR(fence)) if (IS_ERR(fence))
r = PTR_ERR(fence); r = PTR_ERR(fence);
else if (fence) { else if (fence) {
...@@ -1380,7 +1382,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, ...@@ -1380,7 +1382,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
struct drm_file *filp, struct drm_file *filp,
struct drm_amdgpu_fence *user) struct drm_amdgpu_fence *user)
{ {
struct amdgpu_ring *ring; struct drm_sched_entity *entity;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct dma_fence *fence; struct dma_fence *fence;
int r; int r;
...@@ -1389,14 +1391,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, ...@@ -1389,14 +1391,14 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
if (ctx == NULL) if (ctx == NULL)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
r = amdgpu_ctx_get_ring(ctx, user->ip_type, user->ip_instance, r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
user->ring, &ring); user->ring, &entity);
if (r) { if (r) {
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return ERR_PTR(r); return ERR_PTR(r);
} }
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no); fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return fence; return fence;
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_sched.h" #include "amdgpu_sched.h"
#define to_amdgpu_ctx_ring(e) \
container_of((e), struct amdgpu_ctx_ring, entity)
static int amdgpu_ctx_priority_permit(struct drm_file *filp, static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority) enum drm_sched_priority priority)
{ {
...@@ -151,12 +154,12 @@ static void amdgpu_ctx_fini(struct kref *ref) ...@@ -151,12 +154,12 @@ static void amdgpu_ctx_fini(struct kref *ref)
kfree(ctx); kfree(ctx);
} }
int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 hw_ip, u32 instance, u32 ring, u32 ring, struct drm_sched_entity **entity)
struct amdgpu_ring **out_ring)
{ {
struct amdgpu_device *adev = ctx->adev; struct amdgpu_device *adev = ctx->adev;
unsigned num_rings = 0; unsigned num_rings = 0;
struct amdgpu_ring *out_ring;
/* Right now all IPs have only one instance - multiple rings. */ /* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) { if (instance != 0) {
...@@ -166,39 +169,39 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, ...@@ -166,39 +169,39 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
switch (hw_ip) { switch (hw_ip) {
case AMDGPU_HW_IP_GFX: case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring]; out_ring = &adev->gfx.gfx_ring[ring];
num_rings = adev->gfx.num_gfx_rings; num_rings = adev->gfx.num_gfx_rings;
break; break;
case AMDGPU_HW_IP_COMPUTE: case AMDGPU_HW_IP_COMPUTE:
*out_ring = &adev->gfx.compute_ring[ring]; out_ring = &adev->gfx.compute_ring[ring];
num_rings = adev->gfx.num_compute_rings; num_rings = adev->gfx.num_compute_rings;
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
*out_ring = &adev->sdma.instance[ring].ring; out_ring = &adev->sdma.instance[ring].ring;
num_rings = adev->sdma.num_instances; num_rings = adev->sdma.num_instances;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
*out_ring = &adev->uvd.inst[0].ring; out_ring = &adev->uvd.inst[0].ring;
num_rings = adev->uvd.num_uvd_inst; num_rings = adev->uvd.num_uvd_inst;
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring]; out_ring = &adev->vce.ring[ring];
num_rings = adev->vce.num_rings; num_rings = adev->vce.num_rings;
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
*out_ring = &adev->uvd.inst[0].ring_enc[ring]; out_ring = &adev->uvd.inst[0].ring_enc[ring];
num_rings = adev->uvd.num_enc_rings; num_rings = adev->uvd.num_enc_rings;
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec; out_ring = &adev->vcn.ring_dec;
num_rings = 1; num_rings = 1;
break; break;
case AMDGPU_HW_IP_VCN_ENC: case AMDGPU_HW_IP_VCN_ENC:
*out_ring = &adev->vcn.ring_enc[ring]; out_ring = &adev->vcn.ring_enc[ring];
num_rings = adev->vcn.num_enc_rings; num_rings = adev->vcn.num_enc_rings;
break; break;
case AMDGPU_HW_IP_VCN_JPEG: case AMDGPU_HW_IP_VCN_JPEG:
*out_ring = &adev->vcn.ring_jpeg; out_ring = &adev->vcn.ring_jpeg;
num_rings = 1; num_rings = 1;
break; break;
default: default:
...@@ -209,6 +212,7 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, ...@@ -209,6 +212,7 @@ int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx,
if (ring > num_rings) if (ring > num_rings)
return -EINVAL; return -EINVAL;
*entity = &ctx->rings[out_ring->idx].entity;
return 0; return 0;
} }
...@@ -414,13 +418,14 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) ...@@ -414,13 +418,14 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0; return 0;
} }
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *fence, uint64_t* handler) struct drm_sched_entity *entity,
struct dma_fence *fence, uint64_t* handle)
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
uint64_t seq = cring->sequence; uint64_t seq = cring->sequence;
unsigned idx = 0;
struct dma_fence *other = NULL; struct dma_fence *other = NULL;
unsigned idx = 0;
idx = seq & (amdgpu_sched_jobs - 1); idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx]; other = cring->fences[idx];
...@@ -435,22 +440,23 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, ...@@ -435,22 +440,23 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
dma_fence_put(other); dma_fence_put(other);
if (handler) if (handle)
*handler = seq; *handle = seq;
return 0; return 0;
} }
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq) struct drm_sched_entity *entity,
uint64_t seq)
{ {
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
struct dma_fence *fence; struct dma_fence *fence;
spin_lock(&ctx->ring_lock); spin_lock(&ctx->ring_lock);
if (seq == ~0ull) if (seq == ~0ull)
seq = ctx->rings[ring->idx].sequence - 1; seq = cring->sequence - 1;
if (seq >= cring->sequence) { if (seq >= cring->sequence) {
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
...@@ -494,9 +500,10 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, ...@@ -494,9 +500,10 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
} }
} }
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity)
{ {
struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; struct amdgpu_ctx_ring *cring = to_amdgpu_ctx_ring(entity);
unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
struct dma_fence *other = cring->fences[idx]; struct dma_fence *other = cring->fences[idx];
......
...@@ -61,20 +61,22 @@ struct amdgpu_ctx_mgr { ...@@ -61,20 +61,22 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
int amdgpu_ctx_get_ring(struct amdgpu_ctx *ctx, int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 hw_ip, u32 instance, u32 ring, u32 ring, struct drm_sched_entity **entity);
struct amdgpu_ring **out_ring); int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct drm_sched_entity *entity,
struct dma_fence *fence, uint64_t *seq); struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq); struct drm_sched_entity *entity,
uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority); enum drm_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
......
...@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs, ...@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign( TP_fast_assign(
__entry->bo_list = p->bo_list; __entry->bo_list = p->bo_list;
__entry->ring = p->ring->idx; __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
__entry->dw = p->job->ibs[i].length_dw; __entry->dw = p->job->ibs[i].length_dw;
__entry->fences = amdgpu_fence_count_emitted( __entry->fences = amdgpu_fence_count_emitted(
p->ring); to_amdgpu_ring(p->entity->rq->sched));
), ),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw, __entry->bo_list, __entry->ring, __entry->dw,
......
...@@ -1264,11 +1264,12 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) ...@@ -1264,11 +1264,12 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
uint32_t ib_idx) uint32_t ib_idx)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned i; unsigned i;
/* No patching necessary for the first instance */ /* No patching necessary for the first instance */
if (!p->ring->me) if (!ring->me)
return 0; return 0;
for (i = 0; i < ib->length_dw; i += 2) { for (i = 0; i < ib->length_dw; i += 2) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册