提交 7ccf5aa8 编写于 作者: A Alex Deucher

drm/amdgpu/ih: store the full context id

The contextID field (formerly known as src_data) of the IH
vector stores client specific information about an interrupt.
It was expanded from 32 bits to 128 on newer asics.  Expand the
src_id field to handle this.
Reviewed-by: NHarry Wentland <harry.wentland@amd.com>
Reviewed-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 d766e6a3
...@@ -50,10 +50,12 @@ struct amdgpu_ih_ring { ...@@ -50,10 +50,12 @@ struct amdgpu_ih_ring {
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
}; };
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
struct amdgpu_iv_entry { struct amdgpu_iv_entry {
unsigned client_id; unsigned client_id;
unsigned src_id; unsigned src_id;
unsigned src_data; unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
unsigned ring_id; unsigned ring_id;
unsigned vm_id; unsigned vm_id;
unsigned vm_id_src; unsigned vm_id_src;
......
...@@ -250,7 +250,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, ...@@ -250,7 +250,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
......
...@@ -229,7 +229,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, ...@@ -229,7 +229,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
......
...@@ -3398,7 +3398,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3398,7 +3398,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank) if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v10_0_crtc_vblank_int_ack(adev, crtc); dce_v10_0_crtc_vblank_int_ack(adev, crtc);
...@@ -3421,7 +3421,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3421,7 +3421,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
break; break;
default: default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
break; break;
} }
...@@ -3435,12 +3435,12 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, ...@@ -3435,12 +3435,12 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask; uint32_t disp_int, mask;
unsigned hpd; unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) { if (entry->src_data[0] >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
return 0; return 0;
} }
hpd = entry->src_data; hpd = entry->src_data[0];
disp_int = RREG32(interrupt_status_offsets[hpd].reg); disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd; mask = interrupt_status_offsets[hpd].hpd;
......
...@@ -3462,7 +3462,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3462,7 +3462,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank) if (disp_int & interrupt_status_offsets[crtc].vblank)
dce_v11_0_crtc_vblank_int_ack(adev, crtc); dce_v11_0_crtc_vblank_int_ack(adev, crtc);
...@@ -3485,7 +3485,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3485,7 +3485,7 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
break; break;
default: default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
break; break;
} }
...@@ -3499,12 +3499,12 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, ...@@ -3499,12 +3499,12 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask; uint32_t disp_int, mask;
unsigned hpd; unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) { if (entry->src_data[0] >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
return 0; return 0;
} }
hpd = entry->src_data; hpd = entry->src_data[0];
disp_int = RREG32(interrupt_status_offsets[hpd].reg); disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd; mask = interrupt_status_offsets[hpd].hpd;
......
...@@ -2592,7 +2592,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, ...@@ -2592,7 +2592,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank) if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
...@@ -2613,7 +2613,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, ...@@ -2613,7 +2613,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: D%d vline\n", crtc + 1); DRM_DEBUG("IH: D%d vline\n", crtc + 1);
break; break;
default: default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
break; break;
} }
...@@ -2703,12 +2703,12 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, ...@@ -2703,12 +2703,12 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask, tmp; uint32_t disp_int, mask, tmp;
unsigned hpd; unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) { if (entry->src_data[0] >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
return 0; return 0;
} }
hpd = entry->src_data; hpd = entry->src_data[0];
disp_int = RREG32(interrupt_status_offsets[hpd].reg); disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd; mask = interrupt_status_offsets[hpd].hpd;
......
...@@ -3159,7 +3159,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3159,7 +3159,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank) if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
...@@ -3180,7 +3180,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, ...@@ -3180,7 +3180,7 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: D%d vline\n", crtc + 1); DRM_DEBUG("IH: D%d vline\n", crtc + 1);
break; break;
default: default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
break; break;
} }
...@@ -3270,12 +3270,12 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, ...@@ -3270,12 +3270,12 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask, tmp; uint32_t disp_int, mask, tmp;
unsigned hpd; unsigned hpd;
if (entry->src_data >= adev->mode_info.num_hpd) { if (entry->src_data[0] >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
return 0; return 0;
} }
hpd = entry->src_data; hpd = entry->src_data[0];
disp_int = RREG32(interrupt_status_offsets[hpd].reg); disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd; mask = interrupt_status_offsets[hpd].hpd;
......
...@@ -1093,7 +1093,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1093,7 +1093,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) { if (printk_ratelimit()) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr); addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
......
...@@ -1264,7 +1264,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1264,7 +1264,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) { if (printk_ratelimit()) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr); addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
......
...@@ -1301,7 +1301,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1301,7 +1301,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n"); dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
return 0; return 0;
} }
...@@ -1320,7 +1320,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1320,7 +1320,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) { if (printk_ratelimit()) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr); addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
......
...@@ -229,7 +229,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, ...@@ -229,7 +229,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
......
...@@ -131,7 +131,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, ...@@ -131,7 +131,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff;
......
...@@ -240,7 +240,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, ...@@ -240,7 +240,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff; entry->src_id = dw[0] & 0xff;
entry->src_data = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vm_id = (dw[2] >> 8) & 0xff; entry->vm_id = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pas_id = (dw[2] >> 16) & 0xffff;
......
...@@ -560,14 +560,14 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, ...@@ -560,14 +560,14 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: VCE\n"); DRM_DEBUG("IH: VCE\n");
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: case 0:
case 1: case 1:
amdgpu_fence_process(&adev->vce.ring[entry->src_data]); amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
break; break;
default: default:
DRM_ERROR("Unhandled interrupt: %d %d\n", DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
break; break;
} }
......
...@@ -695,15 +695,15 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, ...@@ -695,15 +695,15 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
switch (entry->src_data) { switch (entry->src_data[0]) {
case 0: case 0:
case 1: case 1:
case 2: case 2:
amdgpu_fence_process(&adev->vce.ring[entry->src_data]); amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
break; break;
default: default:
DRM_ERROR("Unhandled interrupt: %d %d\n", DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data[0]);
break; break;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册