未验证 提交 278debab 编写于 作者: L liuwei1031 提交者: GitHub

fix comments of 16410, test=develop (#16499)

* fix comments of 16410, test=develop

* modify inplace_op_inference_test according to pass interface change, test=develop
上级 4c1ec41d
...@@ -195,8 +195,7 @@ cc_library(prune SRCS prune.cc DEPS framework_proto) ...@@ -195,8 +195,7 @@ cc_library(prune SRCS prune.cc DEPS framework_proto)
cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context)
cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry
proto_desc) proto_desc)
cc_test(inplace_op_inference_test SRCS inplace_op_inference_test.cc DEPS op_registry proto_desc op_info memory_optimize_helper) cc_test(inplace_op_inference_test SRCS inplace_op_inference_test.cc DEPS inplace_op_pass op_registry proto_desc op_info memory_optimize_helper pass_builder)
cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_library(selected_rows SRCS selected_rows.cc DEPS tensor)
cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows)
......
...@@ -156,7 +156,6 @@ void InplacePass::ApplyImpl(ir::Graph* graph) const { ...@@ -156,7 +156,6 @@ void InplacePass::ApplyImpl(ir::Graph* graph) const {
continue; continue;
TryInplaceOpInputOutput(op, graph); TryInplaceOpInputOutput(op, graph);
} }
// graph->ResolveHazard(var_nodes_);
} }
void InplacePass::InplaceModifyDesc(const std::string& var, void InplacePass::InplaceModifyDesc(const std::string& var,
...@@ -168,7 +167,7 @@ void InplacePass::InplaceModifyDesc(const std::string& var, ...@@ -168,7 +167,7 @@ void InplacePass::InplaceModifyDesc(const std::string& var,
auto* op_desc = op->Op(); auto* op_desc = op->Op();
op_desc->RenameInput(var, cache_var); op_desc->RenameInput(var, cache_var);
op_desc->RenameOutput(var, cache_var); op_desc->RenameOutput(var, cache_var);
if (op_desc->Block()->HasVar(var)) op_desc->Block()->RemoveVar(var);
op_desc->Flush(); op_desc->Flush();
} }
} }
...@@ -265,8 +264,6 @@ void InplacePass::WithdrawModify(const NodeSwapQueue& nodes, ...@@ -265,8 +264,6 @@ void InplacePass::WithdrawModify(const NodeSwapQueue& nodes,
void InplacePass::TryInplaceOpInputOutput(ir::Node* op, void InplacePass::TryInplaceOpInputOutput(ir::Node* op,
ir::Graph* graph) const { ir::Graph* graph) const {
VLOG(4) << "Try to inplace op " << op->Name(); VLOG(4) << "Try to inplace op " << op->Name();
// PADDLE_ENFORCE(op->Op() != nullptr && op->Op()->Block() != nullptr,
// "op_desc is nullptr");
// some pre-requirments need to meet if the op want to inplaced. // some pre-requirments need to meet if the op want to inplaced.
PADDLE_ENFORCE(op->Op() != nullptr, "op_desc is nullptr"); PADDLE_ENFORCE(op->Op() != nullptr, "op_desc is nullptr");
...@@ -446,6 +443,7 @@ bool GraphView::CheckDeps(ir::Node* var, ir::Node* current_op) const { ...@@ -446,6 +443,7 @@ bool GraphView::CheckDeps(ir::Node* var, ir::Node* current_op) const {
// check if op2 depends on op1's output // check if op2 depends on op1's output
bool GraphView::CheckOpDeps(ir::Node* op1, ir::Node* op2) const { bool GraphView::CheckOpDeps(ir::Node* op1, ir::Node* op2) const {
if (VLOG_IS_ON(4)) {
auto print_op = [&](ir::Node* op, const char* name) { auto print_op = [&](ir::Node* op, const char* name) {
std::ostringstream os; std::ostringstream os;
os << " " << name << " : " << op->Name() << " "; os << " " << name << " : " << op->Name() << " ";
...@@ -458,7 +456,7 @@ bool GraphView::CheckOpDeps(ir::Node* op1, ir::Node* op2) const { ...@@ -458,7 +456,7 @@ bool GraphView::CheckOpDeps(ir::Node* op1, ir::Node* op2) const {
}; };
print_op(op1, "OP1"); print_op(op1, "OP1");
print_op(op2, "OP2"); print_op(op2, "OP2");
}
if (op1 == op2) return true; if (op1 == op2) return true;
if (op_level_.at(op1) >= op_level_.at(op2)) return false; if (op_level_.at(op1) >= op_level_.at(op2)) return false;
......
...@@ -142,16 +142,15 @@ TEST(OrderedSet, FindBestFitNode) { ...@@ -142,16 +142,15 @@ TEST(OrderedSet, FindBestFitNode) {
for (auto& node : nodes) { for (auto& node : nodes) {
pool.Insert(node.get()); pool.Insert(node.get());
} }
// FIXME(liuwei1031) this API has changed,
// disable these tests temporarily auto* n = nodes[0].get();
// FindNextBestFitNode auto* cache = pool.FindBestFitNode(n);
// auto* n = nodes[0].get(); ASSERT_TRUE(cache->Name() == "a" || cache->Name() == "c");
// auto* cache = pool.FindBestFitNode(n); auto* cache_b = pool.FindNextBestFitNode(n, cache);
// PADDLE_ENFORCE(cache->Name() == "a"); ASSERT_TRUE(cache_b->Name() != cache->Name());
// cache = pool.FindNextBestFitNode(n, cache); ASSERT_TRUE(cache_b->Name() == "a" || cache_b->Name() == "c");
// PADDLE_ENFORCE(cache->Name() == "c"); cache = pool.FindNextBestFitNode(n, cache_b);
// cache = pool.FindNextBestFitNode(n, cache); ASSERT_TRUE(cache == nullptr);
// PADDLE_ENFORCE(cache->Name() == "b");
} }
} // namespace details } // namespace details
......
...@@ -12,9 +12,14 @@ ...@@ -12,9 +12,14 @@
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <iostream>
#include <iterator> #include <iterator>
#include <memory>
#include <string> #include <string>
#include <vector>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/framework/details/inplace_op_pass.h"
#include "paddle/fluid/framework/ir/pass_builder.h"
#include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
...@@ -165,118 +170,147 @@ REGISTER_OPERATOR(multi_out_grad, f::NOP, f::MultiOutGradInplaceInToOut, ...@@ -165,118 +170,147 @@ REGISTER_OPERATOR(multi_out_grad, f::NOP, f::MultiOutGradInplaceInToOut,
namespace paddle { namespace paddle {
namespace framework { namespace framework {
// TEST(InferInplace, SingleOpInplaceInToOut) { void FakeSuccData(ProgramDesc* prog) { // NOLINT
// ProgramDesc prog; prog->MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR);
// auto* op = prog.MutableBlock(0)->AppendOp(); prog->MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128});
// op->SetType("single_op"); prog->MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR);
// op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); prog->MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR);
// op->SetOutput("Out", {"test2_out"}); prog->MutableBlock(0)->Var("test2_out");
// prog->MutableBlock(0)->Var("test2_out")->SetShape({64, 32, 128, 128});
// prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); }
// prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128});
// prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); void FakeNoInplaceData(ProgramDesc* prog) { // NOLINT
// prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); prog->MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR);
// prog.MutableBlock(0)->Var("test2_out"); prog->MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128});
// prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 128, 128}); prog->MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR);
// prog->MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR);
// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; prog->MutableBlock(0)->Var("test2_out");
// auto in_to_outs = infer_inplace(*op); prog->MutableBlock(0)->Var("test2_out")->SetShape({64, 31, 128, 128});
// EXPECT_EQ(in_to_outs.size(), 1ul); }
// auto it = in_to_outs.begin();
// EXPECT_EQ(it->first, "test2_a"); ir::Node* GetNodeFromGraph(ir::Graph* g, std::string name) {
// EXPECT_EQ(it->second, "test2_out"); ir::Node* op_node = nullptr;
// } for (auto& item : g->Nodes()) {
// if (item->Name() == name) {
// TEST(InferInplace, SingleGradOpInplaceInToOut) { op_node = item;
// ProgramDesc prog; break;
// auto* op = prog.MutableBlock(0)->AppendOp(); }
// op->SetType("single_op_grad"); }
// op->SetInput(GradVarName("Out"), {"test2_out"}); return op_node;
// op->SetOutput(GradVarName("X"), {"test2_a", "test2_b", "test2_c"}); }
//
// prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); std::unique_ptr<ir::Graph> test_SingleOpInplaceInToOut(
// prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16, 1024, 1024}); std::unique_ptr<ir::Graph> g) {
// prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); std::unique_ptr<details::InplacePass> pass(new details::InplacePass());
// prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); ir::Node* op_node = GetNodeFromGraph(g.get(), "single_op");
// prog.MutableBlock(0)->Var("test2_out"); EXPECT_NE(op_node, nullptr);
// prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 1024, 1024}); pass->Apply(g.get());
// return g;
// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; }
// auto in_to_outs = infer_inplace(*op);
// EXPECT_EQ(in_to_outs.size(), 1ul); TEST(InferInplace, SingleOpInplaceInToOut) {
// auto it = in_to_outs.begin(); ProgramDesc prog;
// EXPECT_EQ(it->first, "test2_out"); auto* op = prog.MutableBlock(0)->AppendOp();
// EXPECT_EQ(it->second, "test2_a"); op->SetType("single_op");
// } op->SetInput("X", {"test2_a", "test2_b", "test2_c"});
// op->SetOutput("Out", {"test2_out"});
// TEST(InferInplace, MultiOutInplaceInToOut) {
// ProgramDesc prog; FakeSuccData(&prog);
// auto* op = prog.MutableBlock(0)->AppendOp(); std::unique_ptr<ir::Graph> g(new ir::Graph(prog));
// op->SetType("multi_out_op"); g = test_SingleOpInplaceInToOut(std::move(g));
// op->SetInput("X", {"a0", "a1"}); auto op_node = GetNodeFromGraph(g.get(), "single_op");
// op->SetInput("Y", {"b0"});
// op->SetInput("Z", {"c0", "c1"}); EXPECT_EQ(op_node->outputs[0]->Name(), "test2_a");
// op->SetOutput("Out", {"o0"}); }
// op->SetOutput("YOut", {"y0"});
// op->SetOutput("ZOut", {"z0"}); TEST(InferInplace, SingleOpInplaceInToOutNoInplace) {
// ProgramDesc prog;
// prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); auto* op = prog.MutableBlock(0)->AppendOp();
// prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); op->SetType("single_op");
// prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); op->SetInput("X", {"test2_a", "test2_b", "test2_c"});
// prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR); op->SetOutput("Out", {"test2_out"});
// prog.MutableBlock(0)->Var("o0");
// prog.MutableBlock(0)->Var("y0"); FakeNoInplaceData(&prog);
// prog.MutableBlock(0)->Var("z0"); std::unique_ptr<ir::Graph> g(new ir::Graph(prog));
// prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); g = test_SingleOpInplaceInToOut(std::move(g));
// prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); auto op_node = GetNodeFromGraph(g.get(), "single_op");
// prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); EXPECT_EQ(op_node->outputs[0]->Name(), "test2_out");
// prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); }
// prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024});
// TEST(InferInplace, MultiOutInplaceInToOut) {
// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; ProgramDesc prog;
// auto in_to_outs = infer_inplace(*op); auto* op = prog.MutableBlock(0)->AppendOp();
// EXPECT_EQ(in_to_outs.size(), 3ul); op->SetType("multi_out_op");
// std::unordered_map<std::string, std::string> expects = { op->SetInput("X", {"a0", "a1"});
// {"a0", "o0"}, {"b0", "y0"}, {"c0", "z0"}, op->SetInput("Y", {"b0"});
// }; op->SetInput("Z", {"c0", "c1"});
// EXPECT_TRUE(expects == in_to_outs); op->SetOutput("Out", {"o0"});
// } op->SetOutput("YOut", {"y0"});
// op->SetOutput("ZOut", {"z0"});
// TEST(InferInplace, MultiGradInplaceInToOut) {
// ProgramDesc prog; prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR);
// auto* op = prog.MutableBlock(0)->AppendOp(); prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR);
// op->SetType("multi_out_grad"); prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR);
// op->SetInput(GradVarName("Out"), {"o0"}); prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR);
// op->SetInput(GradVarName("YOut"), {"y0"}); prog.MutableBlock(0)->Var("o0");
// op->SetInput(GradVarName("ZOut"), {"z0"}); prog.MutableBlock(0)->Var("y0");
// op->SetOutput(GradVarName("X"), {"a0", "a1"}); prog.MutableBlock(0)->Var("z0");
// op->SetOutput(GradVarName("Y"), {"b0"}); prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024});
// op->SetOutput(GradVarName("Z"), {"c0", "c1"}); prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024});
// prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR);
// prog.MutableBlock(0)->Var("o0"); std::unique_ptr<ir::Graph> g(new ir::Graph(prog));
// prog.MutableBlock(0)->Var("y0"); std::unique_ptr<details::InplacePass> pass(new details::InplacePass());
// prog.MutableBlock(0)->Var("z0"); pass->Apply(g.get());
// prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); auto op_node = GetNodeFromGraph(g.get(), "multi_out_op");
// prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); ASSERT_TRUE(op_node != nullptr);
// prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); EXPECT_EQ(op_node->outputs[0]->Name(), "a0");
// prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); EXPECT_EQ(op_node->outputs[1]->Name(), "b0");
// prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); EXPECT_EQ(op_node->outputs[2]->Name(), "c0");
// prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); }
//
// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; TEST(InferInplace, MultiGradInplaceInToOut) {
// auto in_to_outs = infer_inplace(*op); ProgramDesc prog;
// auto* op = prog.MutableBlock(0)->AppendOp();
// EXPECT_EQ(in_to_outs.size(), 3ul); op->SetType("multi_out_grad");
// std::unordered_map<std::string, std::string> expects = { op->SetInput(GradVarName("Out"), {"o0"});
// {"o0", "a0"}, {"y0", "b0"}, {"z0", "c0"}, op->SetInput(GradVarName("YOut"), {"y0"});
// }; op->SetInput(GradVarName("ZOut"), {"z0"});
// EXPECT_TRUE(expects == in_to_outs); op->SetOutput(GradVarName("X"), {"a0", "a1"});
// } op->SetOutput(GradVarName("Y"), {"b0"});
op->SetOutput(GradVarName("Z"), {"c0", "c1"});
prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR);
prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR);
prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR);
prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR);
prog.MutableBlock(0)->Var("o0");
prog.MutableBlock(0)->Var("y0");
prog.MutableBlock(0)->Var("z0");
prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024});
prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024});
prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024});
prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024});
prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024});
prog.MutableBlock(0)->Var("z0")->SetShape({32, 15, 1024, 1024});
std::unique_ptr<ir::Graph> g(new ir::Graph(prog));
std::unique_ptr<details::InplacePass> pass(new details::InplacePass());
pass->Apply(g.get());
auto op_node = GetNodeFromGraph(g.get(), "multi_out_grad");
ASSERT_TRUE(op_node != nullptr);
EXPECT_EQ(op_node->outputs[0]->Name(), "o0");
EXPECT_EQ(op_node->outputs[2]->Name(), "y0");
EXPECT_EQ(op_node->outputs[3]->Name(), "c0");
std::unordered_map<std::string, std::string> expects = {
{"o0", "a0"}, {"y0", "b0"}, {"z0", "c0"},
};
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -56,7 +56,7 @@ proto::VarType::Type GetDataTypeOfVar(const Variable* var) { ...@@ -56,7 +56,7 @@ proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
} }
} }
static DDim GetDims(const Scope& scope, const std::string& name, static DDim GetDimsDebug(const Scope& scope, const std::string& name,
bool get_actual_dim = false) { bool get_actual_dim = false) {
Variable* var = scope.FindVar(name); Variable* var = scope.FindVar(name);
if (var == nullptr) { if (var == nullptr) {
...@@ -65,9 +65,9 @@ static DDim GetDims(const Scope& scope, const std::string& name, ...@@ -65,9 +65,9 @@ static DDim GetDims(const Scope& scope, const std::string& name,
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>(); const LoDTensor& tensor = var->Get<LoDTensor>();
// if (UNLIKELY(!tensor.IsInitialized())) { if (UNLIKELY(!tensor.IsInitialized())) {
// return DDim({-1}); return DDim({-1});
// } }
return tensor.dims(); return tensor.dims();
} else if (var->IsType<SelectedRows>()) { } else if (var->IsType<SelectedRows>()) {
if (get_actual_dim) { if (get_actual_dim) {
...@@ -123,7 +123,7 @@ static int GetRowSize(const Scope& scope, const std::string& name) { ...@@ -123,7 +123,7 @@ static int GetRowSize(const Scope& scope, const std::string& name) {
return -1; return -1;
} }
static LoD GetLoD(const Scope& scope, const std::string& name) { static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
Variable* var = scope.FindVar(name); Variable* var = scope.FindVar(name);
auto default_lod = LoD({{}}); auto default_lod = LoD({{}});
...@@ -133,9 +133,9 @@ static LoD GetLoD(const Scope& scope, const std::string& name) { ...@@ -133,9 +133,9 @@ static LoD GetLoD(const Scope& scope, const std::string& name) {
if (var->IsType<LoDTensor>()) { if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>(); const LoDTensor& tensor = var->Get<LoDTensor>();
// if (UNLIKELY(!tensor.IsInitialized())) { if (UNLIKELY(!tensor.IsInitialized())) {
// return default_lod; return default_lod;
// } }
return tensor.lod(); return tensor.lod();
} else { } else {
return default_lod; return default_lod;
...@@ -274,8 +274,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { ...@@ -274,8 +274,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const {
} }
std::string dtype = GetDtype(*scope, var_name); std::string dtype = GetDtype(*scope, var_name);
ss << ":" << dtype; ss << ":" << dtype;
ss << "[" << GetDims(*scope, var_name, true) << "]"; ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
ss << "(" << GetLoD(*scope, var_name) << ")"; ss << "(" << GetLoDDebug(*scope, var_name) << ")";
} }
} }
if (i != input.second.size() - 1) { if (i != input.second.size() - 1) {
...@@ -305,8 +305,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { ...@@ -305,8 +305,8 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const {
} }
std::string dtype = GetDtype(*scope, output.second[i]); std::string dtype = GetDtype(*scope, output.second[i]);
ss << ":" << dtype; ss << ":" << dtype;
ss << "[" << GetDims(*scope, var_name, true) << "]"; ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
ss << "(" << GetLoD(*scope, var_name) << ")"; ss << "(" << GetLoDDebug(*scope, var_name) << ")";
} }
} }
if (i != output.second.size() - 1) { if (i != output.second.size() - 1) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册