未验证 提交 b5a1a80c 编写于 作者: H Houjiang Chen 提交者: GitHub

Only gradient acc be scheduled in parallel. (#5926)

上级 ce3545ba
......@@ -46,6 +46,7 @@ Maybe<void> CopyOrAccGrad(AutogradMeta* autograd_meta, bool autograd_mode) {
if (new_grad) { current_grad = new_grad; }
}
if (autograd_meta->acc_grad()) {
DevVmDepObjectConsumeModeGuard guard(DevVmDepObjectConsumeMode::NONE);
const auto& output =
JUST(functional::Add(autograd_meta->acc_grad(), current_grad, /*inplace=*/true));
JUST(autograd_meta->set_acc_grad(output));
......@@ -61,14 +62,12 @@ Maybe<void> AutogradEngine::RunBackwardAndSaveGrads4LeafTensor(const TensorTuple
const TensorTuple& out_grads,
bool retain_graph,
bool create_graph) {
DevVmDepObjectConsumeModeGuard guard(DevVmDepObjectConsumeMode::NONE);
return RunBackwardAndSaveGrads4LeafTensorIf(outputs, out_grads, retain_graph, create_graph);
}
Maybe<TensorTuple> AutogradEngine::RunBackwardAndReturnInputsTensorGrad(
const TensorTuple& outputs, const TensorTuple& inputs, const TensorTuple& out_grads,
bool retain_graph, bool create_graph) {
DevVmDepObjectConsumeModeGuard guard(DevVmDepObjectConsumeMode::NONE);
return RunBackwardAndReturnInputsTensorGradIf(outputs, inputs, out_grads, retain_graph,
create_graph);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册