提交 fd06518d 编写于 作者: R Rex Zhu 提交者: Alex Deucher

drm/amd/pp: Fix memory leak in error path in smumgr

Free the backend structure if we fail to allocate device memory.
Reviewed-by: NEvan Quan <evan.quan@amd.com>
Signed-off-by: NRex Zhu <Rex.Zhu@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 116af450
...@@ -764,7 +764,7 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) ...@@ -764,7 +764,7 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
&cz_smu->toc_buffer.mc_addr, &cz_smu->toc_buffer.mc_addr,
&cz_smu->toc_buffer.kaddr); &cz_smu->toc_buffer.kaddr);
if (ret) if (ret)
return -EINVAL; goto err2;
ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
cz_smu->smu_buffer.data_size, cz_smu->smu_buffer.data_size,
...@@ -773,19 +773,15 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) ...@@ -773,19 +773,15 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
&cz_smu->smu_buffer.handle, &cz_smu->smu_buffer.handle,
&cz_smu->smu_buffer.mc_addr, &cz_smu->smu_buffer.mc_addr,
&cz_smu->smu_buffer.kaddr); &cz_smu->smu_buffer.kaddr);
if (ret) { if (ret)
amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle, goto err1;
&cz_smu->toc_buffer.mc_addr,
&cz_smu->toc_buffer.kaddr);
return -EINVAL;
}
if (0 != cz_smu_populate_single_scratch_entry(hwmgr, if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE, UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n"); pr_err("Error when Populate Firmware Entry.\n");
return -1; goto err0;
} }
if (0 != cz_smu_populate_single_scratch_entry(hwmgr, if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
...@@ -793,14 +789,14 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) ...@@ -793,14 +789,14 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n"); pr_err("Error when Populate Firmware Entry.\n");
return -1; goto err0;
} }
if (0 != cz_smu_populate_single_scratch_entry(hwmgr, if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n"); pr_err("Error when Populate Firmware Entry.\n");
return -1; goto err0;
} }
if (0 != cz_smu_populate_single_scratch_entry(hwmgr, if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
...@@ -808,7 +804,7 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) ...@@ -808,7 +804,7 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
sizeof(struct SMU8_MultimediaPowerLogData), sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n"); pr_err("Error when Populate Firmware Entry.\n");
return -1; goto err0;
} }
if (0 != cz_smu_populate_single_scratch_entry(hwmgr, if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
...@@ -816,10 +812,22 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr) ...@@ -816,10 +812,22 @@ static int cz_smu_init(struct pp_hwmgr *hwmgr)
sizeof(struct SMU8_Fusion_ClkTable), sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n"); pr_err("Error when Populate Firmware Entry.\n");
return -1; goto err0;
} }
return 0; return 0;
err0:
amdgpu_bo_free_kernel(&cz_smu->smu_buffer.handle,
&cz_smu->smu_buffer.mc_addr,
&cz_smu->smu_buffer.kaddr);
err1:
amdgpu_bo_free_kernel(&cz_smu->toc_buffer.handle,
&cz_smu->toc_buffer.mc_addr,
&cz_smu->toc_buffer.kaddr);
err2:
kfree(cz_smu);
return -EINVAL;
} }
static int cz_smu_fini(struct pp_hwmgr *hwmgr) static int cz_smu_fini(struct pp_hwmgr *hwmgr)
......
...@@ -354,8 +354,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr) ...@@ -354,8 +354,10 @@ static int fiji_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = fiji_priv; hwmgr->smu_backend = fiji_priv;
if (smu7_init(hwmgr)) if (smu7_init(hwmgr)) {
kfree(fiji_priv);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -271,8 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr) ...@@ -271,8 +271,10 @@ static int iceland_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = iceland_priv; hwmgr->smu_backend = iceland_priv;
if (smu7_init(hwmgr)) if (smu7_init(hwmgr)) {
kfree(iceland_priv);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -349,8 +349,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr) ...@@ -349,8 +349,10 @@ static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = smu_data; hwmgr->smu_backend = smu_data;
if (smu7_init(hwmgr)) if (smu7_init(hwmgr)) {
kfree(smu_data);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -347,7 +347,7 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) ...@@ -347,7 +347,7 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
&priv->smu_tables.entry[WMTABLE].table); &priv->smu_tables.entry[WMTABLE].table);
if (r) if (r)
return -EINVAL; goto err0;
priv->smu_tables.entry[WMTABLE].version = 0x01; priv->smu_tables.entry[WMTABLE].version = 0x01;
priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t);
...@@ -363,18 +363,22 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr) ...@@ -363,18 +363,22 @@ static int rv_smu_init(struct pp_hwmgr *hwmgr)
&priv->smu_tables.entry[CLOCKTABLE].mc_addr, &priv->smu_tables.entry[CLOCKTABLE].mc_addr,
&priv->smu_tables.entry[CLOCKTABLE].table); &priv->smu_tables.entry[CLOCKTABLE].table);
if (r) { if (r)
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle, goto err1;
&priv->smu_tables.entry[WMTABLE].mc_addr,
&priv->smu_tables.entry[WMTABLE].table);
return -EINVAL;
}
priv->smu_tables.entry[CLOCKTABLE].version = 0x01; priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t); priv->smu_tables.entry[CLOCKTABLE].size = sizeof(DpmClocks_t);
priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS; priv->smu_tables.entry[CLOCKTABLE].table_id = TABLE_DPMCLOCKS;
return 0; return 0;
err1:
amdgpu_bo_free_kernel(&priv->smu_tables.entry[WMTABLE].handle,
&priv->smu_tables.entry[WMTABLE].mc_addr,
&priv->smu_tables.entry[WMTABLE].table);
err0:
kfree(priv);
return -EINVAL;
} }
const struct pp_smumgr_func rv_smu_funcs = { const struct pp_smumgr_func rv_smu_funcs = {
......
...@@ -229,8 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr) ...@@ -229,8 +229,10 @@ static int tonga_smu_init(struct pp_hwmgr *hwmgr)
hwmgr->smu_backend = tonga_priv; hwmgr->smu_backend = tonga_priv;
if (smu7_init(hwmgr)) if (smu7_init(hwmgr)) {
kfree(tonga_priv);
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册