提交 069211e5 编写于 作者: C Christian König 提交者: Dave Airlie

drm/radeon: move lockup detection code into radeon_ring.c

It isn't chipset specific, so it makes no sense
to have that inside r100.c.
Signed-off-by: NChristian König <deathsimple@vodafone.de>
Reviewed-by: NJerome Glisse <jglisse@redhat.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 6c6f4783
......@@ -2424,7 +2424,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
int r;
srbm_status = RREG32(SRBM_STATUS);
......@@ -2432,7 +2431,7 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -2444,7 +2443,7 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
radeon_ring_unlock_commit(rdev, ring);
}
ring->rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
......
......@@ -1397,7 +1397,6 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
int r;
srbm_status = RREG32(SRBM_STATUS);
......@@ -1405,7 +1404,7 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -1418,7 +1417,7 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/* XXX deal with CP0,1,2 */
ring->rptr = RREG32(ring->rptr_reg);
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
......
......@@ -2159,59 +2159,6 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
}
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
if (ring->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
......@@ -2219,7 +2166,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -2231,7 +2178,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring);
}
ring->rptr = RREG32(ring->rptr_reg);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
......
......@@ -384,7 +384,7 @@ bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -396,7 +396,7 @@ bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring);
}
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
int r300_asic_reset(struct radeon_device *rdev)
......
......@@ -1350,19 +1350,13 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
struct r100_gpu_lockup *lockup;
int r;
if (rdev->family >= CHIP_RV770)
lockup = &rdev->config.rv770.lockup;
else
lockup = &rdev->config.r600.lockup;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -1374,7 +1368,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring);
}
ring->rptr = RREG32(ring->rptr_reg);
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
......
......@@ -670,6 +670,8 @@ struct radeon_ring {
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
unsigned long last_activity;
unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
......@@ -814,6 +816,8 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
......@@ -1272,16 +1276,10 @@ struct radeon_asic {
/*
* Asic structures
*/
struct r100_gpu_lockup {
unsigned long last_jiffies;
u32 last_cp_rptr;
};
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
struct r300_asic {
......@@ -1289,7 +1287,6 @@ struct r300_asic {
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
struct r600_asic {
......@@ -1311,7 +1308,6 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
struct rv770_asic {
......@@ -1337,7 +1333,6 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
struct evergreen_asic {
......@@ -1364,7 +1359,6 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
struct cayman_asic {
......@@ -1403,7 +1397,6 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
struct si_asic {
......@@ -1434,7 +1427,6 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
union radeon_asic_config {
......
......@@ -103,11 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
struct radeon_ring *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
......
......@@ -396,6 +396,59 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
mutex_unlock(&ring->mutex);
}
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
ring->last_rptr = ring->rptr;
ring->last_activity = jiffies;
}
/**
* radeon_ring_test_lockup() - check if ring is lockedup by recording information
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
uint32_t rptr;
cjiffies = jiffies;
if (!time_after(cjiffies, ring->last_activity)) {
/* likely a wrap around */
radeon_ring_lockup_update(ring);
return false;
}
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
if (ring->rptr != ring->last_rptr) {
/* CP is still working no lockup */
radeon_ring_lockup_update(ring);
return false;
}
elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
......
......@@ -2217,7 +2217,6 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
u32 srbm_status;
u32 grbm_status, grbm_status2;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
int r;
srbm_status = RREG32(SRBM_STATUS);
......@@ -2226,7 +2225,7 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
......@@ -2239,7 +2238,7 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/* XXX deal with CP0,1,2 */
ring->rptr = RREG32(ring->rptr_reg);
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
return radeon_ring_test_lockup(rdev, ring);
}
static int si_gpu_soft_reset(struct radeon_device *rdev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册