提交 1cf0abb6 编写于 作者: A Alex Deucher

drm/amdgpu/sdma: simplify sdma instance setup

Set the me instance in early init and use that rather than
calculating the instance based on the ring pointer.
Reviewed-by: NJames Zhu <James.Zhu@amd.com>
Reviewed-by: NLeo Liu <leo.liu@amd.com>
Reviewed-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 ed54d954
...@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
} }
/** /**
...@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(lower_32_bits(ring->wptr) << 2) & 0x3fffc); (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
} }
...@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask; u32 ref_and_mask;
if (ring == &ring->adev->sdma.instance[0].ring) if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
...@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->sdma.num_instances; i++) for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
} }
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
......
...@@ -202,8 +202,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -202,8 +202,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
return wptr; return wptr;
} }
...@@ -218,9 +217,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -218,9 +217,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
} }
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
...@@ -273,7 +271,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -273,7 +271,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
if (ring == &ring->adev->sdma.instance[0].ring) if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
...@@ -1213,8 +1211,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1213,8 +1211,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->sdma.num_instances; i++) for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
} }
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
......
...@@ -365,9 +365,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -365,9 +365,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
/* XXX check if swapping is necessary on BE */ /* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else { } else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
} }
return wptr; return wptr;
...@@ -394,9 +392,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -394,9 +392,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else { } else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
} }
} }
...@@ -450,7 +446,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -450,7 +446,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
if (ring == &ring->adev->sdma.instance[0].ring) if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
...@@ -1655,8 +1651,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1655,8 +1651,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->sdma.num_instances; i++) for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
} }
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
......
...@@ -296,13 +296,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -296,13 +296,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else { } else {
u32 lowbit, highbit; u32 lowbit, highbit;
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
me, highbit, lowbit); ring->me, highbit, lowbit);
wptr = highbit; wptr = highbit;
wptr = wptr << 32; wptr = wptr << 32;
wptr |= lowbit; wptr |= lowbit;
...@@ -339,17 +338,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -339,17 +338,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
ring->doorbell_index, ring->wptr << 2); ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2); WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else { } else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
DRM_DEBUG("Not using doorbell -- " DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x " "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
me, ring->me,
lower_32_bits(ring->wptr << 2), lower_32_bits(ring->wptr << 2),
me, ring->me,
upper_32_bits(ring->wptr << 2)); upper_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
} }
} }
...@@ -430,7 +427,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) ...@@ -430,7 +427,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring == &ring->adev->sdma.instance[0].ring) if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
else else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
...@@ -1651,8 +1648,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1651,8 +1648,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->sdma.num_instances; i++) for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
} }
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册