提交 c4f46f22 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: rename vm_id to vmid

sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.c
sed -i "s/vm_id/vmid/g" drivers/gpu/drm/amd/amdgpu/*.h
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NChunming Zhou <david1.zhou@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 620f774f
...@@ -351,7 +351,7 @@ struct amdgpu_gart_funcs { ...@@ -351,7 +351,7 @@ struct amdgpu_gart_funcs {
/* get the pde for a given mc addr */ /* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level, void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags); u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vm_id); uint32_t (*get_invalidate_req)(unsigned int vmid);
}; };
/* provided by the ih block */ /* provided by the ih block */
...@@ -1124,7 +1124,7 @@ struct amdgpu_job { ...@@ -1124,7 +1124,7 @@ struct amdgpu_job {
void *owner; void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush; bool vm_needs_flush;
unsigned vm_id; unsigned vmid;
uint64_t vm_pd_addr; uint64_t vm_pd_addr;
uint32_t gds_base, gds_size; uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size; uint32_t gws_base, gws_size;
...@@ -1849,7 +1849,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1849,7 +1849,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
......
...@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL; return -EINVAL;
} }
if (vm && !job->vm_id) { if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n"); dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL; return -EINVAL;
} }
...@@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue; continue;
amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
need_ctx_switch); need_ctx_switch);
need_ctx_switch = false; need_ctx_switch = false;
} }
...@@ -229,8 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -229,8 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_fence_emit(ring, f); r = amdgpu_fence_emit(ring, f);
if (r) { if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r); dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id) if (job && job->vmid)
amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vm_id); amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
return r; return r;
} }
......
...@@ -150,7 +150,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, ...@@ -150,7 +150,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
id->last_flush = NULL; id->last_flush = NULL;
} }
job->vm_id = id - id_mgr->ids; job->vmid = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job); trace_amdgpu_vm_grab_id(vm, ring, job);
out: out:
return r; return r;
...@@ -301,7 +301,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -301,7 +301,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
no_flush_needed: no_flush_needed:
list_move_tail(&id->list, &id_mgr->ids_lru); list_move_tail(&id->list, &id_mgr->ids_lru);
job->vm_id = id - id_mgr->ids; job->vmid = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job); trace_amdgpu_vm_grab_id(vm, ring, job);
error: error:
...@@ -360,7 +360,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, ...@@ -360,7 +360,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
* amdgpu_vmid_reset - reset VMID to zero * amdgpu_vmid_reset - reset VMID to zero
* *
* @adev: amdgpu device structure * @adev: amdgpu device structure
* @vm_id: vmid number to use * @vmid: vmid number to use
* *
* Reset saved GDW, GWS and OA to force switch on next flush. * Reset saved GDW, GWS and OA to force switch on next flush.
*/ */
......
...@@ -105,8 +105,8 @@ struct amdgpu_iv_entry { ...@@ -105,8 +105,8 @@ struct amdgpu_iv_entry {
unsigned client_id; unsigned client_id;
unsigned src_id; unsigned src_id;
unsigned ring_id; unsigned ring_id;
unsigned vm_id; unsigned vmid;
unsigned vm_id_src; unsigned vmid_src;
uint64_t timestamp; uint64_t timestamp;
unsigned timestamp_src; unsigned timestamp_src;
unsigned pas_id; unsigned pas_id;
......
...@@ -158,7 +158,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, ...@@ -158,7 +158,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
} }
} }
while (fence == NULL && vm && !job->vm_id) { while (fence == NULL && vm && !job->vmid) {
struct amdgpu_ring *ring = job->ring; struct amdgpu_ring *ring = job->ring;
r = amdgpu_vmid_grab(vm, ring, &job->sync, r = amdgpu_vmid_grab(vm, ring, &job->sync,
......
...@@ -121,11 +121,11 @@ struct amdgpu_ring_funcs { ...@@ -121,11 +121,11 @@ struct amdgpu_ring_funcs {
/* command emit functions */ /* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring, void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch); unsigned vmid, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags); uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring); void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr); uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring); void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
......
...@@ -82,8 +82,8 @@ TRACE_EVENT(amdgpu_iv, ...@@ -82,8 +82,8 @@ TRACE_EVENT(amdgpu_iv,
__field(unsigned, client_id) __field(unsigned, client_id)
__field(unsigned, src_id) __field(unsigned, src_id)
__field(unsigned, ring_id) __field(unsigned, ring_id)
__field(unsigned, vm_id) __field(unsigned, vmid)
__field(unsigned, vm_id_src) __field(unsigned, vmid_src)
__field(uint64_t, timestamp) __field(uint64_t, timestamp)
__field(unsigned, timestamp_src) __field(unsigned, timestamp_src)
__field(unsigned, pas_id) __field(unsigned, pas_id)
...@@ -93,8 +93,8 @@ TRACE_EVENT(amdgpu_iv, ...@@ -93,8 +93,8 @@ TRACE_EVENT(amdgpu_iv,
__entry->client_id = iv->client_id; __entry->client_id = iv->client_id;
__entry->src_id = iv->src_id; __entry->src_id = iv->src_id;
__entry->ring_id = iv->ring_id; __entry->ring_id = iv->ring_id;
__entry->vm_id = iv->vm_id; __entry->vmid = iv->vmid;
__entry->vm_id_src = iv->vm_id_src; __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp; __entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src; __entry->timestamp_src = iv->timestamp_src;
__entry->pas_id = iv->pas_id; __entry->pas_id = iv->pas_id;
...@@ -103,9 +103,9 @@ TRACE_EVENT(amdgpu_iv, ...@@ -103,9 +103,9 @@ TRACE_EVENT(amdgpu_iv,
__entry->src_data[2] = iv->src_data[2]; __entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3]; __entry->src_data[3] = iv->src_data[3];
), ),
TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
__entry->client_id, __entry->src_id, __entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vm_id, __entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pas_id, __entry->timestamp, __entry->pas_id,
__entry->src_data[0], __entry->src_data[1], __entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3]) __entry->src_data[2], __entry->src_data[3])
...@@ -219,7 +219,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, ...@@ -219,7 +219,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm) __field(struct amdgpu_vm *, vm)
__field(u32, ring) __field(u32, ring)
__field(u32, vm_id) __field(u32, vmid)
__field(u32, vm_hub) __field(u32, vm_hub)
__field(u64, pd_addr) __field(u64, pd_addr)
__field(u32, needs_flush) __field(u32, needs_flush)
...@@ -228,13 +228,13 @@ TRACE_EVENT(amdgpu_vm_grab_id, ...@@ -228,13 +228,13 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign( TP_fast_assign(
__entry->vm = vm; __entry->vm = vm;
__entry->ring = ring->idx; __entry->ring = ring->idx;
__entry->vm_id = job->vm_id; __entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub, __entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr; __entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush; __entry->needs_flush = job->vm_needs_flush;
), ),
TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vm_id, __entry->vm, __entry->ring, __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush) __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
); );
...@@ -357,24 +357,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes, ...@@ -357,24 +357,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
); );
TRACE_EVENT(amdgpu_vm_flush, TRACE_EVENT(amdgpu_vm_flush,
TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id, TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr), uint64_t pd_addr),
TP_ARGS(ring, vm_id, pd_addr), TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(u32, ring) __field(u32, ring)
__field(u32, vm_id) __field(u32, vmid)
__field(u32, vm_hub) __field(u32, vm_hub)
__field(u64, pd_addr) __field(u64, pd_addr)
), ),
TP_fast_assign( TP_fast_assign(
__entry->ring = ring->idx; __entry->ring = ring->idx;
__entry->vm_id = vm_id; __entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub; __entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr; __entry->pd_addr = pd_addr;
), ),
TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx", TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
__entry->ring, __entry->vm_id, __entry->ring, __entry->vmid,
__entry->vm_hub,__entry->pd_addr) __entry->vm_hub,__entry->pd_addr)
); );
......
...@@ -991,7 +991,7 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -991,7 +991,7 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
* *
*/ */
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
......
...@@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); ...@@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch); unsigned vmid, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags); unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
......
...@@ -446,9 +446,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, ...@@ -446,9 +446,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
bool gds_switch_needed; bool gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
if (job->vm_id == 0) if (job->vmid == 0)
return false; return false;
id = &id_mgr->ids[job->vm_id]; id = &id_mgr->ids[job->vmid];
gds_switch_needed = ring->funcs->emit_gds_switch && ( gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base || id->gds_base != job->gds_base ||
id->gds_size != job->gds_size || id->gds_size != job->gds_size ||
...@@ -472,7 +472,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) ...@@ -472,7 +472,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
* amdgpu_vm_flush - hardware flush the vm * amdgpu_vm_flush - hardware flush the vm
* *
* @ring: ring to use for flush * @ring: ring to use for flush
* @vm_id: vmid number to use * @vmid: vmid number to use
* @pd_addr: address of the page directory * @pd_addr: address of the page directory
* *
* Emit a VM flush when it is necessary. * Emit a VM flush when it is necessary.
...@@ -482,7 +482,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ ...@@ -482,7 +482,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[job->vm_id]; struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
bool gds_switch_needed = ring->funcs->emit_gds_switch && ( bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base || id->gds_base != job->gds_base ||
id->gds_size != job->gds_size || id->gds_size != job->gds_size ||
...@@ -511,8 +511,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ ...@@ -511,8 +511,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
if (ring->funcs->emit_vm_flush && vm_flush_needed) { if (ring->funcs->emit_vm_flush && vm_flush_needed) {
struct dma_fence *fence; struct dma_fence *fence;
trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr); trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
r = amdgpu_fence_emit(ring, &fence); r = amdgpu_fence_emit(ring, &fence);
if (r) if (r)
...@@ -532,7 +532,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ ...@@ -532,7 +532,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->gws_size = job->gws_size; id->gws_size = job->gws_size;
id->oa_base = job->oa_base; id->oa_base = job->oa_base;
id->oa_size = job->oa_size; id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base, job->gds_size, job->gws_base,
job->gws_size, job->oa_base, job->gws_size, job->oa_base,
job->oa_size); job->oa_size);
......
...@@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, ...@@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
......
...@@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/ */
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 extra_bits = vm_id & 0xf; u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */ /* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
...@@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (CIK). * using sDMA (CIK).
*/ */
static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else { } else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
} }
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */ /* flush TLB */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
......
...@@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, ...@@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
......
...@@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, ...@@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
...@@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vm_id << 24); control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
...@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
} }
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
...@@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id )); amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
} else { } else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
} }
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
...@@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
......
...@@ -2252,7 +2252,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, ...@@ -2252,7 +2252,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
*/ */
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
...@@ -2267,7 +2267,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, ...@@ -2267,7 +2267,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vm_id << 24); control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
...@@ -2281,9 +2281,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, ...@@ -2281,9 +2281,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
...@@ -3237,19 +3237,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -3237,19 +3237,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using the CP (CIK). * using the CP (CIK).
*/ */
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else { } else {
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
} }
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
...@@ -3260,7 +3260,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -3260,7 +3260,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
......
...@@ -6245,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) ...@@ -6245,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
...@@ -6254,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, ...@@ -6254,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vm_id << 24); control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1); control |= INDIRECT_BUFFER_PRE_ENB(1);
...@@ -6275,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, ...@@ -6275,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
...@@ -6328,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -6328,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
} }
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
...@@ -6336,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -6336,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) | WRITE_DATA_DST_SEL(0)) |
WR_CONFIRM); WR_CONFIRM);
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else { } else {
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
} }
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
...@@ -6353,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -6353,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0))); WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
......
...@@ -3594,7 +3594,7 @@ static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) ...@@ -3594,7 +3594,7 @@ static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
...@@ -3603,7 +3603,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, ...@@ -3603,7 +3603,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vm_id << 24); control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1); control |= INDIRECT_BUFFER_PRE_ENB(1);
...@@ -3625,9 +3625,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ ...@@ -3625,9 +3625,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
...@@ -3683,11 +3683,11 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -3683,11 +3683,11 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
} }
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
...@@ -3695,11 +3695,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -3695,11 +3695,11 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
pd_addr |= flags; pd_addr |= flags;
gfx_v9_0_write_data_to_reg(ring, usepfp, true, gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32 + (2 * vm_id), hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr)); lower_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true, gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_hi32 + (2 * vm_id), hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr)); upper_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true, gfx_v9_0_write_data_to_reg(ring, usepfp, true,
...@@ -3707,7 +3707,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -3707,7 +3707,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack + gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
eng, 0, 1 << vm_id, 1 << vm_id, 0x20); eng, 0, 1 << vmid, 1 << vmid, 0x20);
/* compute doesn't have PFP */ /* compute doesn't have PFP */
if (usepfp) { if (usepfp) {
......
...@@ -248,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, ...@@ -248,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
uint32_t status = 0; uint32_t status = 0;
u64 addr; u64 addr;
...@@ -262,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, ...@@ -262,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) { if (printk_ratelimit()) {
dev_err(adev->dev, dev_err(adev->dev,
"[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n", "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
entry->vm_id_src ? "mmhub" : "gfxhub", entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vm_id, entry->src_id, entry->ring_id, entry->vmid,
entry->pas_id); entry->pas_id);
dev_err(adev->dev, " at page 0x%016llx from %d\n", dev_err(adev->dev, " at page 0x%016llx from %d\n",
addr, entry->client_id); addr, entry->client_id);
...@@ -288,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -288,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
} }
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
{ {
u32 req = 0; u32 req = 0;
/* invalidate using legacy mode on vm_id*/ /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vm_id); PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
......
...@@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, ...@@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
......
...@@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/ */
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */ /* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */ /* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
...@@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI). * using sDMA (VI).
*/ */
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else { } else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
} }
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
...@@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
......
...@@ -417,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -417,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/ */
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */ /* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */ /* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
...@@ -1127,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -1127,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI). * using sDMA (VI).
*/ */
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vm_id < 8) { if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else { } else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
} }
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
...@@ -1142,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1142,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
......
...@@ -330,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) ...@@ -330,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/ */
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
u32 vmid = vm_id & 0xf;
/* IB packet must end on a 8 DW boundary */ /* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */ /* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
...@@ -1135,10 +1133,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -1135,10 +1133,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VEGA10). * using sDMA (VEGA10).
*/ */
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
...@@ -1147,12 +1145,12 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1147,12 +1145,12 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2); amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2); amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, upper_32_bits(pd_addr));
/* flush TLB */ /* flush TLB */
...@@ -1167,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1167,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vm_id); /* reference */ amdgpu_ring_write(ring, 1 << vmid); /* reference */
amdgpu_ring_write(ring, 1 << vm_id); /* mask */ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
} }
......
...@@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs. * Pad as necessary with NOPs.
*/ */
while ((lower_32_bits(ring->wptr) & 7) != 5) while ((lower_32_bits(ring->wptr) & 7) != 5)
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
...@@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI). * using sDMA (VI).
*/ */
static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
if (vm_id < 8) if (vmid < 8)
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
else else
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8))); amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-7 are the VM contexts0-7 */ /* bits 0-7 are the VM contexts0-7 */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
/* wait for invalidate to complete */ /* wait for invalidate to complete */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0xff << 16); /* retry */ amdgpu_ring_write(ring, 0xff << 16); /* retry */
amdgpu_ring_write(ring, 1 << vm_id); /* mask */ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, 0); /* value */ amdgpu_ring_write(ring, 0); /* value */
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
} }
......
...@@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, ...@@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;
} }
......
...@@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, ...@@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
......
...@@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) ...@@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
*/ */
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr); amdgpu_ring_write(ring, ib->gpu_addr);
......
...@@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) ...@@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
*/ */
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
......
...@@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) ...@@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
*/ */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
...@@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer * Write enc ring commands to execute the indirect buffer
*/ */
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
uint32_t reg; uint32_t reg;
if (vm_id < 8) if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, reg << 2);
...@@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8); amdgpu_ring_write(ring, 0x8);
...@@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
amdgpu_ring_write(ring, 1 << vm_id); /* mask */ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xC); amdgpu_ring_write(ring, 0xC);
} }
...@@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) ...@@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
} }
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
} }
static bool uvd_v6_0_is_idle(void *handle) static bool uvd_v6_0_is_idle(void *handle)
......
...@@ -1218,13 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) ...@@ -1218,13 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
*/ */
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
...@@ -1246,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -1246,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer * Write enc ring commands to execute the indirect buffer
*/ */
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
...@@ -1291,10 +1291,10 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, ...@@ -1291,10 +1291,10 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
} }
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
...@@ -1302,15 +1302,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1302,15 +1302,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
data1 = upper_32_bits(pd_addr); data1 = upper_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1); uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1); uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
mask = 0xffffffff; mask = 0xffffffff;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
...@@ -1322,8 +1322,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1322,8 +1322,8 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */ /* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2; data0 = (hub->vm_inv_eng0_ack + eng) << 2;
data1 = 1 << vm_id; data1 = 1 << vmid;
mask = 1 << vm_id; mask = 1 << vmid;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
} }
...@@ -1343,10 +1343,10 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) ...@@ -1343,10 +1343,10 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
} }
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
...@@ -1354,15 +1354,15 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1354,15 +1354,15 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
...@@ -1374,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1374,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
} }
#if 0 #if 0
......
...@@ -834,24 +834,24 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) ...@@ -834,24 +834,24 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
} }
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, VCE_CMD_IB_VM); amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB); amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_write(ring, VCE_CMD_END);
} }
......
...@@ -938,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle, ...@@ -938,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle,
#endif #endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, VCE_CMD_IB_VM); amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
...@@ -965,10 +965,10 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring) ...@@ -965,10 +965,10 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
} }
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
...@@ -976,15 +976,15 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -976,15 +976,15 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
pd_addr |= flags; pd_addr |= flags;
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
...@@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT); amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
} }
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
......
...@@ -833,13 +833,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) ...@@ -833,13 +833,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
*/ */
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch) unsigned vmid, bool ctx_switch)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
...@@ -888,10 +888,10 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring, ...@@ -888,10 +888,10 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
} }
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
...@@ -899,15 +899,15 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -899,15 +899,15 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags; pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
data1 = upper_32_bits(pd_addr); data1 = upper_32_bits(pd_addr);
vcn_v1_0_dec_vm_reg_write(ring, data0, data1); vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
vcn_v1_0_dec_vm_reg_write(ring, data0, data1); vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2; data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
mask = 0xffffffff; mask = 0xffffffff;
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
...@@ -919,8 +919,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -919,8 +919,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */ /* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2; data0 = (hub->vm_inv_eng0_ack + eng) << 2;
data1 = 1 << vm_id; data1 = 1 << vmid;
mask = 1 << vm_id; mask = 1 << vmid;
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
} }
...@@ -1011,20 +1011,20 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) ...@@ -1011,20 +1011,20 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer * Write enc ring commands to execute the indirect buffer
*/ */
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch) struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{ {
amdgpu_ring_write(ring, VCN_ENC_CMD_IB); amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
amdgpu_ring_write(ring, vm_id); amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID; uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng; unsigned eng = ring->vm_inv_eng;
...@@ -1033,17 +1033,17 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1033,17 +1033,17 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2); (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2); (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff); amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, lower_32_bits(pd_addr));
...@@ -1055,8 +1055,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, ...@@ -1055,8 +1055,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vm_id); amdgpu_ring_write(ring, 1 << vmid);
} }
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
......
...@@ -327,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, ...@@ -327,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = dw[0] & 0xff; entry->client_id = dw[0] & 0xff;
entry->src_id = (dw[0] >> 8) & 0xff; entry->src_id = (dw[0] >> 8) & 0xff;
entry->ring_id = (dw[0] >> 16) & 0xff; entry->ring_id = (dw[0] >> 16) & 0xff;
entry->vm_id = (dw[0] >> 24) & 0xf; entry->vmid = (dw[0] >> 24) & 0xf;
entry->vm_id_src = (dw[0] >> 31); entry->vmid_src = (dw[0] >> 31);
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31; entry->timestamp_src = dw[2] >> 31;
entry->pas_id = dw[3] & 0xffff; entry->pas_id = dw[3] & 0xffff;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册