From 8965cee89f83f2d2d4d403e0908232a2810e3149 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 28 Aug 2018 16:26:37 +0800 Subject: [PATCH] Polish PrintOp (#12895) * Polish PrintOp * Polish PrintOp * Polish PrintOp * Refine test_print_op --- paddle/fluid/framework/var_type.h | 2 +- paddle/fluid/operators/print_op.cc | 104 ++++++------------ python/paddle/fluid/layers/control_flow.py | 5 +- .../fluid/tests/unittests/test_print_op.py | 5 +- 4 files changed, 37 insertions(+), 79 deletions(-) diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 429997c8b89..e9550dbfb97 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -26,7 +26,7 @@ namespace paddle { namespace framework { template -bool IsType(const std::type_index& type_index) { +inline bool IsType(const std::type_index& type_index) { return type_index == std::type_index(typeid(T)); } diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index cceac402951..e7f1caf4d3a 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -13,14 +13,12 @@ limitations under the License. */ #include -#include - #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/framework/variable.h" namespace paddle { namespace operators { +using framework::GradVarName; #define CLOG std::cout @@ -35,7 +33,7 @@ struct Formater { std::type_index dtype{typeid(const char)}; framework::LoD lod; int summarize; - void* data{nullptr}; + void *data{nullptr}; void operator()(size_t size) { PrintMessage(); @@ -101,7 +99,7 @@ struct Formater { template void Display(size_t size) { - auto* d = reinterpret_cast(data); + auto *d = reinterpret_cast(data); CLOG << "\tdata: "; if (summarize != -1) { summarize = std::min(size, (size_t)summarize); @@ -120,51 +118,36 @@ struct Formater { // TODO(ChunweiYan) there should be some other printers for TensorArray class TensorPrintOp : public framework::OperatorBase { public: - TensorPrintOp(const std::string& type, - const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) + TensorPrintOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} - TensorPrintOp(const TensorPrintOp& o) + TensorPrintOp(const TensorPrintOp &o) : framework::OperatorBase( - static_cast(o)) { + static_cast(o)) { PADDLE_THROW("Not implemented."); } private: - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override { - const framework::Variable* in_var_ptr = nullptr; - std::string phase(kForward); + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + const framework::Variable *in_var_ptr = nullptr; std::string printed_var_name = ""; - auto& inputs = Inputs(); - if (inputs.find("In") != inputs.end() && !Inputs("In").empty()) { - in_var_ptr = scope.FindVar(Input("In")); - printed_var_name = Inputs("In").front(); - } else if (inputs.find("In@GRAD") != inputs.end() && - !Inputs("In@GRAD").empty()) { - in_var_ptr = scope.FindVar(Input("In@GRAD")); - printed_var_name = Inputs("In@GRAD").front(); - phase = std::string(kBackward); - } else { - PADDLE_THROW("Unknown phase, should be forward or backward."); - } + in_var_ptr = scope.FindVar(Input("In")); + printed_var_name = Inputs("In").front(); PADDLE_ENFORCE_NOT_NULL(in_var_ptr); - auto& in_tensor = in_var_ptr->Get(); - auto* out_var_ptr = scope.FindVar(Output("Out")); - auto& out_tensor = *out_var_ptr->GetMutable(); - - // Just copy data from input tensor to output tensor - // output tensor share same memory with input tensor - out_tensor.ShareDataWith(in_tensor); - out_tensor.set_lod(in_tensor.lod()); + auto &in_tensor = in_var_ptr->Get(); std::string print_phase = Attr("print_phase"); - if (print_phase != phase && print_phase != std::string(kBoth)) { + bool is_forward = Attr("is_forward"); + + if ((is_forward && print_phase == kBackward) || + (!is_forward && print_phase == kForward)) { return; } @@ -192,7 +175,7 @@ class TensorPrintOp : public framework::OperatorBase { formater.dtype = printed_tensor.type(); } if (Attr("print_tensor_shape")) { - auto& dims = printed_tensor.dims(); + auto &dims = printed_tensor.dims(); formater.dims.resize(dims.size()); for (int i = 0; i < dims.size(); ++i) formater.dims[i] = dims[i]; } @@ -200,7 +183,7 @@ class TensorPrintOp : public framework::OperatorBase { formater.lod = printed_tensor.lod(); } formater.summarize = Attr("summarize"); - formater.data = reinterpret_cast(printed_tensor.data()); + formater.data = reinterpret_cast(printed_tensor.data()); formater(printed_tensor.numel()); } @@ -219,14 +202,14 @@ class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { AddAttr("print_tensor_type", "Whether to print the tensor's dtype."); AddAttr("print_tensor_shape", "Whether to print the tensor's shape."); AddAttr("print_tensor_lod", "Whether to print the tensor's lod."); - AddAttr( - "print_phase", - "(string, default 'BOTH') Which phase to display including 'FORWARD' " - "'BACKWARD' and 'BOTH'.") + AddAttr("print_phase", + "(string, default 'FORWARD') Which phase to display " + "including 'FORWARD' " + "'BACKWARD' and 'BOTH'.") .SetDefault(std::string(kBoth)) .InEnum({std::string(kForward), std::string(kBackward), std::string(kBoth)}); - AddOutput("Out", "Output tensor with same data as input tensor."); + AddAttr("is_forward", "Whether is forward or not").SetDefault(true); AddComment(R"DOC( Creates a print op that will print when a tensor is accessed. @@ -238,40 +221,21 @@ tensor `t`.)DOC"); class InferShapeForward : public framework::InferShapeBase { public: - void operator()(framework::InferShapeContext* context) const override { + void operator()(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("In"), "Input(In) should not be null."); - context->ShareLoD("In", /*->*/ "Out"); - context->SetOutputDim("Out", context->GetInputDim("In")); - } -}; - -class InferShapeBackward : public framework::InferShapeBase { - public: - void operator()(framework::InferShapeContext* context) const override { - PADDLE_ENFORCE(context->HasInput("In@GRAD"), - "Input(In@GRAD) should not be null."); - context->ShareLoD("In@GRAD", /*->*/ "Out"); - context->SetOutputDim("Out", context->GetInputDim("In@GRAD")); } }; -class InferVarType : public framework::VarTypeInference { - public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override {} -}; - -class PrintOpProtoAndCheckGradOpMaker - : public framework::SingleGradOpDescMaker { +class PrintOpGradientMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; std::unique_ptr Apply() const override { - auto* op_desc_ptr = new framework::OpDesc(); - op_desc_ptr->SetType("print_grad"); - op_desc_ptr->SetInput("In@GRAD", OutputGrad("Out")); - op_desc_ptr->SetOutput("Out", InputGrad("In")); + auto *op_desc_ptr = new framework::OpDesc(); + op_desc_ptr->SetType("print"); + op_desc_ptr->SetInput("In", InputGrad("In")); op_desc_ptr->SetAttrMap(Attrs()); + op_desc_ptr->SetAttr("is_forward", false); return std::unique_ptr(op_desc_ptr); } }; @@ -282,6 +246,4 @@ class PrintOpProtoAndCheckGradOpMaker namespace ops = paddle::operators; REGISTER_OPERATOR(print, ops::TensorPrintOp, ops::PrintOpProtoAndCheckMaker, - ops::PrintOpProtoAndCheckGradOpMaker, ops::InferShapeForward, - ops::InferVarType); -REGISTER_OPERATOR(print_grad, ops::TensorPrintOp, ops::InferShapeBackward); + ops::PrintOpGradientMaker, ops::InferShapeForward); diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index d2954c4c220..c9a2f8a0abf 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -189,7 +189,6 @@ def Print(input, message="The content of some_layer: ") ''' helper = LayerHelper('print', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op( type='print', inputs={'In': input}, @@ -202,9 +201,7 @@ def Print(input, 'print_tensor_shape': print_tensor_shape, 'print_tensor_lod': print_tensor_lod, 'print_phase': print_phase.upper() - }, - outputs={'Out': out}) - return out + }) class BlockGuard(object): diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index ac682d6181c..8097b5f7343 100644 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -35,9 +35,8 @@ class TestPrintOpCPU(unittest.TestCase): def build_network(self, only_forward, **kargs): x = layers.data('x', shape=[3], dtype='float32', lod_level=1) x.stop_gradient = False - printed = layers.Print(input=x, **kargs) - if only_forward: return printed - loss = layers.mean(printed) + layers.Print(input=x, **kargs) + loss = layers.mean(x) append_backward(loss=loss) return loss -- GitLab