未验证 提交 80310541 编写于 作者: J Jiabin Yang 提交者: GitHub

【Prim】optimize log (#50160)

* optimize log

* fix type error

* fix type error2
上级 cc8a7858
......@@ -67,7 +67,7 @@ class ElementwiseAddCompositeGradOpMaker
auto dy_ptr = this->GetOutputPtr(&dy);
std::string dy_name = this->GetOutputName(dy);
int axis = static_cast<int>(this->Attr<int>("axis"));
VLOG(3) << "Runing add_grad composite func";
VLOG(6) << "Runing add_grad composite func";
prim::add_grad<prim::DescTensor>(x, y, out_grad, axis, dx_ptr, dy_ptr);
this->RecoverOutputName(dx, dx_name);
this->RecoverOutputName(dy, dy_name);
......
......@@ -84,7 +84,7 @@ class ElementwiseDivCompositeGradOpMaker
auto dy_ptr = this->GetOutputPtr(&dy);
std::string dy_name = this->GetOutputName(dy);
int axis = static_cast<int>(this->Attr<int>("axis"));
VLOG(3) << "Runing div_grad composite func";
VLOG(6) << "Runing div_grad composite func";
prim::divide_grad<prim::DescTensor>(
x, y, out, out_grad, axis, dx_ptr, dy_ptr);
this->RecoverOutputName(dx, dx_name);
......
......@@ -88,7 +88,7 @@ class ElementwiseMulCompositeGradOpMaker
static_cast<int>(this->Attr<int>("axis")),
x_grad_p,
y_grad_p);
VLOG(3) << "Runing mul_grad composite func";
VLOG(6) << "Runing mul_grad composite func";
this->RecoverOutputName(x_grad, x_grad_name);
this->RecoverOutputName(y_grad, y_grad_name);
}
......
......@@ -70,7 +70,7 @@ class ElementwiseSubCompositeGradOpMaker
auto dy_ptr = this->GetOutputPtr(&dy);
std::string dy_name = this->GetOutputName(dy);
int axis = static_cast<int>(this->Attr<int>("axis"));
VLOG(3) << "Runing sub_grad composite func";
VLOG(6) << "Runing sub_grad composite func";
prim::subtract_grad<prim::DescTensor>(x, y, out_grad, axis, dx_ptr, dy_ptr);
this->RecoverOutputName(dx, dx_name);
this->RecoverOutputName(dy, dy_name);
......
......@@ -206,7 +206,7 @@ class ExpandV2CompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
auto shape = this->Attr<std::vector<int>>("shape");
prim::expand_grad<prim::DescTensor>(
x, out_grad, paddle::experimental::IntArray(shape), x_grad_p);
VLOG(3) << "Runing expand_v2 composite func";
VLOG(6) << "Runing expand_v2 composite func";
this->RecoverOutputName(x_grad, x_grad_name);
}
};
......
......@@ -665,7 +665,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endmacro %}
{% macro call_composite_backward_api(composite_func_info) %}
VLOG(3) << "Runing {{composite_func_info["func_name"]}} composite func";
VLOG(6) << "Runing {{composite_func_info["func_name"]}} composite func";
prim::{{composite_func_info["func_name"]}}<prim::DescTensor>({{composite_func_info["func_args"]}});
{%- endmacro %}
......
......@@ -84,7 +84,7 @@ class ReduceSumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
// get output orginal name
std::string x_grad_name = this->GetOutputName(x_grad_t);
VLOG(3) << "Runing sum_grad composite func";
VLOG(6) << "Runing sum_grad composite func";
// call composite backward func
prim::sum_grad<prim::DescTensor>(
x, out_grad, axis, keep_dim, reduce_all, x_grad);
......
......@@ -57,6 +57,8 @@ class CompositeGradOpMakerBase {
acting_program_(framework::ProgramDesc()),
grad_block_(grad_block) {
// TODO(jiabin): This should always execute by one thread...
VLOG(6) << "Constructing Composite Grad func for " << fwd_op_.Type()
<< "_grad ";
StaticCompositeContext::Instance().SetBlock(
acting_program_.MutableBlock(0));
}
......@@ -64,6 +66,7 @@ class CompositeGradOpMakerBase {
virtual ~CompositeGradOpMakerBase() = default;
virtual std::vector<std::unique_ptr<framework::OpDesc>> operator()() {
VLOG(3) << "Runing Composite Grad func for " << fwd_op_.Type() << "_grad ";
this->Apply();
std::vector<std::unique_ptr<framework::OpDesc>> ops;
// TODO(jiabin): Support multiple blocks later
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册