提交 01d468d9 编写于 作者: Y yipechai 提交者: Alex Deucher

drm/amdgpu: Modify .ras_fini function pointer parameter

Modify .ras_fini function pointer parameter so that
we can remove redundant intermediate calls in some
ras blocks.
Signed-off-by: Nyipechai <YiPeng.Chai@amd.com>
Reviewed-by: NTao Zhou <tao.zhou1@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 b5175966
...@@ -644,7 +644,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r ...@@ -644,7 +644,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
return r; return r;
} }
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
adev->gfx.ras_if) adev->gfx.ras_if)
......
...@@ -387,7 +387,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, ...@@ -387,7 +387,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data, void *err_data,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
......
...@@ -455,16 +455,16 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) ...@@ -455,16 +455,16 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
{ {
if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini) if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini)
adev->umc.ras->ras_block.ras_fini(adev); adev->umc.ras->ras_block.ras_fini(adev, NULL);
if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini) if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini)
adev->mmhub.ras->ras_block.ras_fini(adev); adev->mmhub.ras->ras_block.ras_fini(adev, NULL);
if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini) if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini)
adev->gmc.xgmi.ras->ras_block.ras_fini(adev); adev->gmc.xgmi.ras->ras_block.ras_fini(adev, NULL);
if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini) if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini)
adev->hdp.ras->ras_block.ras_fini(adev); adev->hdp.ras->ras_block.ras_fini(adev, NULL);
} }
/* /*
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
void amdgpu_hdp_ras_fini(struct amdgpu_device *adev) void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) &&
adev->hdp.ras_if) adev->hdp.ras_if)
......
...@@ -44,5 +44,5 @@ struct amdgpu_hdp { ...@@ -44,5 +44,5 @@ struct amdgpu_hdp {
}; };
int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); void amdgpu_hdp_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif /* __AMDGPU_HDP_H__ */ #endif /* __AMDGPU_HDP_H__ */
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_ras.h" #include "amdgpu_ras.h"
void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev) void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
adev->mmhub.ras_if) adev->mmhub.ras_if)
......
...@@ -47,6 +47,6 @@ struct amdgpu_mmhub { ...@@ -47,6 +47,6 @@ struct amdgpu_mmhub {
struct amdgpu_mmhub_ras *ras; struct amdgpu_mmhub_ras *ras;
}; };
void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif #endif
...@@ -44,7 +44,7 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if * ...@@ -44,7 +44,7 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *
return r; return r;
} }
void amdgpu_nbio_ras_fini(struct amdgpu_device *adev) void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
adev->nbio.ras_if) adev->nbio.ras_if)
......
...@@ -105,5 +105,5 @@ struct amdgpu_nbio { ...@@ -105,5 +105,5 @@ struct amdgpu_nbio {
}; };
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); void amdgpu_nbio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
#endif #endif
...@@ -491,7 +491,7 @@ struct amdgpu_ras_block_object { ...@@ -491,7 +491,7 @@ struct amdgpu_ras_block_object {
int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
enum amdgpu_ras_block block, uint32_t sub_block_index); enum amdgpu_ras_block block, uint32_t sub_block_index);
int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block); int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void (*ras_fini)(struct amdgpu_device *adev); void (*ras_fini)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
ras_ih_cb ras_cb; ras_ih_cb ras_cb;
const struct amdgpu_ras_block_hw_ops *hw_ops; const struct amdgpu_ras_block_hw_ops *hw_ops;
}; };
......
...@@ -111,7 +111,7 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, ...@@ -111,7 +111,7 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
return r; return r;
} }
void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
adev->sdma.ras_if) adev->sdma.ras_if)
......
...@@ -118,7 +118,7 @@ int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); ...@@ -118,7 +118,7 @@ int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block); struct ras_common_if *ras_block);
void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); void amdgpu_sdma_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data, void *err_data,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
......
...@@ -162,7 +162,7 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r ...@@ -162,7 +162,7 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
return r; return r;
} }
void amdgpu_umc_ras_fini(struct amdgpu_device *adev) void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
adev->umc.ras_if) adev->umc.ras_if)
......
...@@ -73,7 +73,7 @@ struct amdgpu_umc { ...@@ -73,7 +73,7 @@ struct amdgpu_umc {
}; };
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
void amdgpu_umc_ras_fini(struct amdgpu_device *adev); void amdgpu_umc_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev, int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
void *ras_error_status, void *ras_error_status,
bool reset); bool reset);
......
...@@ -768,7 +768,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm ...@@ -768,7 +768,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_comm
return amdgpu_ras_block_late_init(adev, ras_block); return amdgpu_ras_block_late_init(adev, ras_block);
} }
static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
adev->gmc.xgmi.ras_if) adev->gmc.xgmi.ras_if)
......
...@@ -2433,7 +2433,7 @@ static int gfx_v9_0_sw_fini(void *handle) ...@@ -2433,7 +2433,7 @@ static int gfx_v9_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini) if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini)
adev->gfx.ras->ras_block.ras_fini(adev); adev->gfx.ras->ras_block.ras_fini(adev, NULL);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
......
...@@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev, ...@@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status); ras_error_status);
} }
static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev) static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
amdgpu_mca_ras_fini(adev, &adev->mca.mp0); amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
} }
...@@ -83,7 +83,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ...@@ -83,7 +83,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status); ras_error_status);
} }
static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev) static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
amdgpu_mca_ras_fini(adev, &adev->mca.mp1); amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
} }
...@@ -115,7 +115,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ...@@ -115,7 +115,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
ras_error_status); ras_error_status);
} }
static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev) static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ {
amdgpu_mca_ras_fini(adev, &adev->mca.mpio); amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
} }
......
...@@ -1997,7 +1997,7 @@ static int sdma_v4_0_sw_fini(void *handle) ...@@ -1997,7 +1997,7 @@ static int sdma_v4_0_sw_fini(void *handle)
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
adev->sdma.ras->ras_block.ras_fini) adev->sdma.ras->ras_block.ras_fini)
adev->sdma.ras->ras_block.ras_fini(adev); adev->sdma.ras->ras_block.ras_fini(adev, NULL);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring); amdgpu_ring_fini(&adev->sdma.instance[i].ring);
......
...@@ -1215,7 +1215,7 @@ static int soc15_common_sw_fini(void *handle) ...@@ -1215,7 +1215,7 @@ static int soc15_common_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini) if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini)
adev->nbio.ras->ras_block.ras_fini(adev); adev->nbio.ras->ras_block.ras_fini(adev, NULL);
if (adev->df.funcs && if (adev->df.funcs &&
adev->df.funcs->sw_fini) adev->df.funcs->sw_fini)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册