提交 67a359d8 编写于 作者: D David Yat Sin 提交者: Alex Deucher

drm/amdkfd: CRIU remove sync and TLB flush on restore

When the process is getting restored, the queues are not mapped yet, so
there is no VMID assigned for this process and no TLBs to flush.
Signed-off-by: NDavid Yat Sin <david.yatsin@amd.com>
Reviewed-by: NFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 2e9fda82
......@@ -2102,7 +2102,6 @@ static int criu_restore_bos(struct kfd_process *p,
struct kfd_criu_bo_bucket *bo_buckets = NULL;
struct kfd_criu_bo_priv_data *bo_privs = NULL;
const bool criu_resume = true;
bool flush_tlbs = false;
int ret = 0, j = 0;
uint32_t i = 0;
......@@ -2248,7 +2247,6 @@ static int criu_restore_bos(struct kfd_process *p,
for (j = 0; j < p->n_pdds; j++) {
struct kfd_dev *peer;
struct kfd_process_device *peer_pdd;
bool table_freed = false;
if (!bo_priv->mapped_gpuids[j])
break;
......@@ -2268,20 +2266,11 @@ static int criu_restore_bos(struct kfd_process *p,
pr_debug("map mem in restore ioctl -> 0x%llx\n",
((struct kgd_mem *)mem)->va);
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev,
(struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed);
(struct kgd_mem *)mem, peer_pdd->drm_priv, NULL);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
goto exit;
}
if (table_freed)
flush_tlbs = true;
}
ret = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
(struct kgd_mem *) mem, true);
if (ret) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto exit;
}
pr_debug("map memory was successful for the BO\n");
......@@ -2296,23 +2285,6 @@ static int criu_restore_bos(struct kfd_process *p,
}
} /* done */
if (flush_tlbs) {
/* Flush TLBs after waiting for the page table updates to complete */
for (j = 0; j < p->n_pdds; j++) {
struct kfd_dev *peer;
struct kfd_process_device *pdd = p->pdds[j];
struct kfd_process_device *peer_pdd;
peer = kfd_device_by_id(pdd->dev->id);
if (WARN_ON_ONCE(!peer))
continue;
peer_pdd = kfd_get_process_device_data(peer, p);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
}
/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
ret = copy_to_user((void __user *)args->bos,
bo_buckets,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册