From a77d75cd87ec91f5f1688ed81882833a3ee96c42 Mon Sep 17 00:00:00 2001 From: Zeng Jinle <32832641+sneaxiy@users.noreply.github.com> Date: Thu, 17 Oct 2019 23:07:02 +0800 Subject: [PATCH] [Cherry-pick 1.6]Fix op run log when memory optimization strategy is enabled (#20696) * fix op log bug, test=release/1.6 * add unittests, test=release/1.6 --- paddle/fluid/framework/operator.cc | 15 ++-- paddle/fluid/operators/CMakeLists.txt | 2 + .../fluid/operators/op_debug_string_test.cc | 71 +++++++++++++++++++ 3 files changed, 84 insertions(+), 4 deletions(-) create mode 100644 paddle/fluid/operators/op_debug_string_test.cc diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 42e70d9cb0d..79a226b0b0e 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -132,9 +132,6 @@ static LoD GetLoDDebug(const Scope& scope, const std::string& name) { if (var->IsType()) { const LoDTensor& tensor = var->Get(); - if (UNLIKELY(!tensor.IsInitialized())) { - return default_lod; - } return tensor.lod(); } else { return default_lod; @@ -238,8 +235,16 @@ const std::vector& OperatorBase::Outputs( std::string OperatorBase::DebugStringEx(const Scope* scope) const { std::stringstream ss; ss << "Op(" << type_ << "), inputs:{"; + + std::unordered_set no_need_buffer_vars; + if (info_ && info_->NoNeedBufferVarsInferer()) { + no_need_buffer_vars = + Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()); + } + for (auto it = inputs_.begin(); it != inputs_.end();) { auto& input = *it; + bool is_no_need_buffer_var = (no_need_buffer_vars.count(input.first) > 0); ss << input.first << "["; for (size_t i = 0; i < input.second.size(); ++i) { auto var_name = input.second[i]; @@ -252,7 +257,9 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { if (row_size >= 0) { ss << "[row_size=" << row_size << "]"; } - std::string dtype = GetDtype(*scope, var_name); + std::string dtype = is_no_need_buffer_var + ? "unknown_dtype" + : GetDtype(*scope, var_name); ss << ":" << dtype; ss << "[" << GetDimsDebug(*scope, var_name, true) << "]"; ss << "(" << GetLoDDebug(*scope, var_name) << ")"; diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 886ff49f049..8a66c8d30c6 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -128,3 +128,5 @@ endif() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") add_subdirectory(benchmark) + +cc_test(op_debug_string_test SRCS op_debug_string_test.cc DEPS elementwise_add_op) diff --git a/paddle/fluid/operators/op_debug_string_test.cc b/paddle/fluid/operators/op_debug_string_test.cc new file mode 100644 index 00000000000..777b0e5dfd7 --- /dev/null +++ b/paddle/fluid/operators/op_debug_string_test.cc @@ -0,0 +1,71 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +USE_OP(elementwise_add_grad); + +namespace paddle { +namespace operators { + +TEST(op_debug_str, test_unknown_dtype) { + platform::Place place = platform::CPUPlace(); + framework::DDim dim{3, 4, 5, 6}; + const std::string unknown_dtype = "unknown_dtype"; + + framework::OpDesc desc; + framework::Scope scope; + + desc.SetType("elementwise_add_grad"); + desc.SetInput("Y", {"Y"}); + desc.SetInput(framework::GradVarName("Out"), {framework::GradVarName("Out")}); + desc.SetOutput(framework::GradVarName("X"), {framework::GradVarName("X")}); + desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")}); + desc.SetAttr("axis", -1); + desc.SetAttr("use_mkldnn", false); + desc.SetAttr("x_data_format", ""); + desc.SetAttr("y_data_format", ""); + + auto y_tensor = scope.Var("Y")->GetMutable(); + y_tensor->Resize(dim); + y_tensor->mutable_data(place); + + auto out_grad_tensor = scope.Var(framework::GradVarName("Out")) + ->GetMutable(); + out_grad_tensor->Resize(dim); + out_grad_tensor->mutable_data(place); + + scope.Var(framework::GradVarName("X"))->GetMutable(); + scope.Var(framework::GradVarName("Y"))->GetMutable(); + + auto op = framework::OpRegistry::CreateOp(desc); + + auto before_run_str = op->DebugStringEx(&scope); + LOG(INFO) << before_run_str; + ASSERT_TRUE(before_run_str.find(unknown_dtype) != std::string::npos); + + op->Run(scope, place); + platform::DeviceContextPool::Instance().Get(place)->Wait(); + + auto after_run_str = op->DebugStringEx(&scope); + LOG(INFO) << after_run_str; + ASSERT_TRUE(after_run_str.find(unknown_dtype) != std::string::npos); +} + +} // namespace operators +} // namespace paddle -- GitLab