提交 ab575de7 编写于 作者: Z Zeng Jinle 提交者: gongweibao

Fix op run log when memory optimization strategy is enabled (#20695)

上级 a1cd27f1
......@@ -132,9 +132,6 @@ static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
if (var->IsType<LoDTensor>()) {
const LoDTensor& tensor = var->Get<LoDTensor>();
if (UNLIKELY(!tensor.IsInitialized())) {
return default_lod;
}
return tensor.lod();
} else {
return default_lod;
......@@ -238,8 +235,16 @@ const std::vector<std::string>& OperatorBase::Outputs(
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
std::stringstream ss;
ss << "Op(" << type_ << "), inputs:{";
std::unordered_set<std::string> no_need_buffer_vars;
if (info_ && info_->NoNeedBufferVarsInferer()) {
no_need_buffer_vars =
Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs());
}
for (auto it = inputs_.begin(); it != inputs_.end();) {
auto& input = *it;
bool is_no_need_buffer_var = (no_need_buffer_vars.count(input.first) > 0);
ss << input.first << "[";
for (size_t i = 0; i < input.second.size(); ++i) {
auto var_name = input.second[i];
......@@ -252,7 +257,9 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const {
if (row_size >= 0) {
ss << "[row_size=" << row_size << "]";
}
std::string dtype = GetDtype(*scope, var_name);
std::string dtype = is_no_need_buffer_var
? "unknown_dtype"
: GetDtype(*scope, var_name);
ss << ":" << dtype;
ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
ss << "(" << GetLoDDebug(*scope, var_name) << ")";
......
......@@ -128,3 +128,5 @@ endif()
set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
add_subdirectory(benchmark)
cc_test(op_debug_string_test SRCS op_debug_string_test.cc DEPS elementwise_add_op)
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
USE_OP(elementwise_add_grad);
namespace paddle {
namespace operators {
TEST(op_debug_str, test_unknown_dtype) {
platform::Place place = platform::CPUPlace();
framework::DDim dim{3, 4, 5, 6};
const std::string unknown_dtype = "unknown_dtype";
framework::OpDesc desc;
framework::Scope scope;
desc.SetType("elementwise_add_grad");
desc.SetInput("Y", {"Y"});
desc.SetInput(framework::GradVarName("Out"), {framework::GradVarName("Out")});
desc.SetOutput(framework::GradVarName("X"), {framework::GradVarName("X")});
desc.SetOutput(framework::GradVarName("Y"), {framework::GradVarName("Y")});
desc.SetAttr("axis", -1);
desc.SetAttr("use_mkldnn", false);
desc.SetAttr("x_data_format", "");
desc.SetAttr("y_data_format", "");
auto y_tensor = scope.Var("Y")->GetMutable<framework::LoDTensor>();
y_tensor->Resize(dim);
y_tensor->mutable_data<float>(place);
auto out_grad_tensor = scope.Var(framework::GradVarName("Out"))
->GetMutable<framework::LoDTensor>();
out_grad_tensor->Resize(dim);
out_grad_tensor->mutable_data<float>(place);
scope.Var(framework::GradVarName("X"))->GetMutable<framework::LoDTensor>();
scope.Var(framework::GradVarName("Y"))->GetMutable<framework::LoDTensor>();
auto op = framework::OpRegistry::CreateOp(desc);
auto before_run_str = op->DebugStringEx(&scope);
LOG(INFO) << before_run_str;
ASSERT_TRUE(before_run_str.find(unknown_dtype) != std::string::npos);
op->Run(scope, place);
platform::DeviceContextPool::Instance().Get(place)->Wait();
auto after_run_str = op->DebugStringEx(&scope);
LOG(INFO) << after_run_str;
ASSERT_TRUE(after_run_str.find(unknown_dtype) != std::string::npos);
}
} // namespace operators
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册