diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc index d7580a1cfd689c122ea1e7db5918e2cd8711718f..bb52d7e498e55c02ddc2cd6d07ccccd51ce4edc5 100644 --- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc @@ -13,13 +13,10 @@ // limitations under the License. #include "paddle/fluid/framework/ir/attention_lstm_fuse_pass.h" - #include - #include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h" #include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/inference/api/helper.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 434bee4ccee1c199088d09c934fe86435ec7d095..731b89423354532f684e19305dfa87e8eb75d4b1 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -85,7 +85,7 @@ void GraphPatternDetector::operator()(Graph* graph, LOG(INFO) << "detect " << subgraphs.size() << " subgraph matches the pattern"; int id = 0; for (auto& g : subgraphs) { - LOG(INFO) << "optimizing #" << id++ << " subgraph"; + VLOG(3) << "optimizing #" << id++ << " subgraph"; handler(g, graph); } } diff --git a/paddle/fluid/framework/ir/graph_viz_pass.cc b/paddle/fluid/framework/ir/graph_viz_pass.cc index 4c7ffe69e933de3d52c8f762a1eeb73de17e0561..31ed98db72c8fd4af8c970861d386687962001ce 100644 --- a/paddle/fluid/framework/ir/graph_viz_pass.cc +++ b/paddle/fluid/framework/ir/graph_viz_pass.cc @@ -50,20 +50,37 @@ std::unique_ptr GraphVizPass::ApplyImpl( Dot dot; - std::vector op_attrs({Dot::Attr("style", "filled"), - Dot::Attr("shape", "box"), - Dot::Attr("fillcolor", "red")}); - std::vector var_attrs({Dot::Attr("style", "filled,rounded"), - // Dot::Attr("shape", "diamond"), - Dot::Attr("fillcolor", "yellow")}); - - std::vector marked_op_attrs({Dot::Attr("style", "filled"), - Dot::Attr("shape", "box"), - Dot::Attr("fillcolor", "lightgray")}); - std::vector marked_var_attrs( - {Dot::Attr("style", "filled,rounded"), - // Dot::Attr("shape", "diamond"), - Dot::Attr("fillcolor", "lightgray")}); + const std::vector op_attrs({ + Dot::Attr("style", "rounded,filled,bold"), // + Dot::Attr("shape", "box"), // + Dot::Attr("color", "#303A3A"), // + Dot::Attr("fontcolor", "#ffffff"), // + Dot::Attr("width", "1.3"), // + Dot::Attr("height", "0.84"), // + Dot::Attr("fontname", "Arial"), // + }); + const std::vector arg_attrs({ + Dot::Attr("shape", "box"), // + Dot::Attr("style", "rounded,filled,bold"), // + Dot::Attr("fontname", "Arial"), // + Dot::Attr("fillcolor", "#999999"), // + Dot::Attr("color", "#dddddd"), // + }); + + const std::vector param_attrs({ + Dot::Attr("shape", "box"), // + Dot::Attr("style", "rounded,filled,bold"), // + Dot::Attr("fontname", "Arial"), // + Dot::Attr("color", "#148b97"), // + Dot::Attr("fontcolor", "#ffffff"), // + }); + + const std::vector marked_op_attrs( + {Dot::Attr("style", "rounded,filled,bold"), Dot::Attr("shape", "box"), + Dot::Attr("fillcolor", "yellow")}); + const std::vector marked_var_attrs( + {Dot::Attr("style", "filled,rounded"), Dot::Attr("shape", "box"), + Dot::Attr("fillcolor", "yellow")}); auto marked_nodes = ConsumeMarkedNodes(graph.get()); // Create nodes @@ -74,9 +91,17 @@ std::unique_ptr GraphVizPass::ApplyImpl( marked_nodes.count(n) ? marked_op_attrs : op_attrs; dot.AddNode(node_id, attr, node_id); } else if (n->IsVar()) { - decltype(op_attrs) attr = - marked_nodes.count(n) ? marked_var_attrs : var_attrs; - dot.AddNode(node_id, attr, node_id); + decltype(op_attrs)* attr; + if (marked_nodes.count(n)) { + attr = &marked_var_attrs; + } else if (const_cast(n)->Var() && + const_cast(n)->Var()->Persistable()) { + attr = ¶m_attrs; + } else { + attr = &arg_attrs; + } + + dot.AddNode(node_id, *attr, node_id); } node2dot[n] = node_id; } diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc index ca834406451d53fd44887300561a6327d97cafcd..1fd884435d173800563ea37809003ed3aee16c7c 100644 --- a/paddle/fluid/inference/analysis/analyzer.cc +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -106,7 +106,6 @@ void Analyzer::Run(Argument* argument) { } } passes.push_back("graph_viz_pass"); - // Ugly support fluid-to-ir-pass argument->Set(kFluidToIrPassesAttr, new std::vector(passes)); for (auto& x : data_) { diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 94be6733f63488634ee1302e015a57c9a8892d84..4cf26d3c70eafd951d14c26335416ec2c71c001d 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -16,6 +16,7 @@ #include #include +#include // NOLINT #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/inference/analysis/ut_helper.h" @@ -24,12 +25,12 @@ #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/inference/utils/singleton.h" -#include "paddle/fluid/platform/profiler.h" DEFINE_string(infer_ditu_rnn_model, "", "model path for ditu RNN"); DEFINE_string(infer_ditu_rnn_data, "", "data path for ditu RNN"); DEFINE_int32(batch_size, 10, "batch size."); DEFINE_int32(repeat, 1, "Running the inference program repeat times."); +DEFINE_int32(num_threads, 1, "Running the inference program in multi-threads."); namespace paddle { namespace inference { @@ -220,39 +221,6 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, } } -std::string DescribeTensor(const PaddleTensor &tensor) { - std::stringstream os; - os << "Tensor [" << tensor.name << "]\n"; - os << " - type: "; - switch (tensor.dtype) { - case PaddleDType::FLOAT32: - os << "float32"; - break; - case PaddleDType::INT64: - os << "int64"; - break; - default: - os << "unset"; - } - os << '\n'; - - os << " - shape: " << to_string(tensor.shape) << '\n'; - os << " - lod: "; - for (auto &l : tensor.lod) { - os << to_string(l) << "; "; - } - os << "\n"; - os << " - data: "; - - int dim = std::accumulate(tensor.shape.begin(), tensor.shape.end(), 1, - [](int a, int b) { return a * b; }); - for (int i = 0; i < dim; i++) { - os << static_cast(tensor.data.data())[i] << " "; - } - os << '\n'; - return os.str(); -} - } // namespace const float ditu_rnn_target_data[] = { @@ -266,11 +234,29 @@ const float ditu_rnn_target_data[] = { 10.7286, 12.0595, 10.6672, 0, 0, 0, 0, 0, 93.5771, 3.84641, 0, 0, 0, 0, 0, 0, 169.426, 0, 0, 0, 0, 0, 0, 0}; +void CompareResult(const std::vector &outputs, + const std::vector &base_outputs) { + PADDLE_ENFORCE_GT(outputs.size(), 0); + PADDLE_ENFORCE_EQ(outputs.size(), base_outputs.size()); + for (size_t i = 0; i < outputs.size(); i++) { + auto &out = outputs[i]; + auto &base_out = base_outputs[i]; + size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, + [](int a, int b) { return a * b; }); + size_t size1 = std::accumulate(base_out.shape.begin(), base_out.shape.end(), + 1, [](int a, int b) { return a * b; }); + PADDLE_ENFORCE_EQ(size, size1); + PADDLE_ENFORCE_GT(size, 0); + float *data = static_cast(out.data.data()); + float *base_data = static_cast(base_out.data.data()); + for (size_t i = 0; i < size; i++) { + EXPECT_NEAR(data[i], base_data[i], 1e-3); + } + } +} // Test with a really complicate model. -void TestDituRNNPrediction(const std::string &model_path, - const std::string &data_path, int batch_size, - bool use_analysis, bool activate_ir, - int num_times = 1) { +void TestDituRNNPrediction(bool use_analysis, bool activate_ir, + int num_threads) { AnalysisConfig config; config.prog_file = FLAGS_infer_ditu_rnn_model + "/__model__"; config.param_file = FLAGS_infer_ditu_rnn_model + "/param"; @@ -281,6 +267,8 @@ void TestDituRNNPrediction(const std::string &model_path, PADDLE_ENFORCE(config.ir_mode == AnalysisConfig::IrPassMode::kExclude); // default config.ir_passes.clear(); // Do not exclude any pass. + int batch_size = FLAGS_batch_size; + int num_times = FLAGS_repeat; auto base_predictor = CreatePaddlePredictor(config); @@ -288,40 +276,55 @@ void TestDituRNNPrediction(const std::string &model_path, CreatePaddlePredictor( config); std::vector input_slots; - DataRecord data(data_path, batch_size); + DataRecord data(FLAGS_infer_ditu_rnn_data, batch_size); // Prepare inputs. PrepareInputs(&input_slots, &data, batch_size); std::vector outputs, base_outputs; base_predictor->Run(input_slots, &base_outputs); - Timer timer; - timer.tic(); - for (int i = 0; i < num_times; i++) { - predictor->Run(input_slots, &outputs); - } LOG(INFO) << "===========profile result==========="; - LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << num_times - << ", latency: " << timer.toc() / num_times << "ms"; - LOG(INFO) << "====================================="; - - PADDLE_ENFORCE_GT(outputs.size(), 0); - PADDLE_ENFORCE_EQ(outputs.size(), base_outputs.size()); - for (size_t i = 0; i < outputs.size(); i++) { - auto &out = outputs[i]; - auto &base_out = base_outputs[i]; - size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, - [](int a, int b) { return a * b; }); - size_t size1 = std::accumulate(base_out.shape.begin(), base_out.shape.end(), - 1, [](int a, int b) { return a * b; }); - PADDLE_ENFORCE_EQ(size, size1); - PADDLE_ENFORCE_GT(size, 0); - float *data = static_cast(out.data.data()); - float *base_data = static_cast(base_out.data.data()); - for (size_t j = 0; j < size; j++) { - EXPECT_NEAR(data[j], base_data[j], 1e-3); + if (num_threads == 1) { + // Prepare inputs. + Timer timer; + timer.tic(); + for (int i = 0; i < num_times; i++) { + predictor->Run(input_slots, &outputs); + } + PrintTime(batch_size, num_times, 1, 0, timer.toc() / num_times); + CompareResult(outputs, base_outputs); + } else { + std::vector threads; + std::vector> predictors; + // TODO(yanchunwei): Bug here, the analyzer phase can't be parallelled + // because AttentionLSTM's hard code nodeid will be damanged. + for (int tid = 0; tid < num_threads; ++tid) { + predictors.emplace_back( + CreatePaddlePredictor( + config)); + } + for (int tid = 0; tid < num_threads; ++tid) { + threads.emplace_back([&, tid]() { + // Each thread should have local input_slots and outputs. + std::vector input_slots; + DataRecord data(FLAGS_infer_ditu_rnn_data, batch_size); + PrepareInputs(&input_slots, &data, batch_size); + std::vector outputs; + Timer timer; + timer.tic(); + for (int i = 0; i < num_times; i++) { + predictors[tid]->Run(input_slots, &outputs); + } + PrintTime(batch_size, num_times, num_threads, tid, + timer.toc() / num_times); + CompareResult(outputs, base_outputs); + }); + } + for (int i = 0; i < num_threads; ++i) { + threads[i].join(); } } + LOG(INFO) << "====================================="; if (use_analysis && activate_ir) { AnalysisPredictor *analysis_predictor = @@ -350,25 +353,26 @@ void TestDituRNNPrediction(const std::string &model_path, } } -// Directly infer with the original model. -TEST(Analyzer, DituRNN_without_analysis) { - TestDituRNNPrediction(FLAGS_infer_ditu_rnn_model, FLAGS_infer_ditu_rnn_data, - FLAGS_batch_size, false, false, FLAGS_repeat); +// Inference with analysis and IR, easy for profiling independently. +TEST(Analyzer, DituRNN) { + TestDituRNNPrediction(true, true, FLAGS_num_threads); } -// Inference with the original model with the analysis turned on, the analysis -// module will transform the program to a data flow graph. -TEST(Analyzer, DituRNN_with_analysis) { - LOG(INFO) << "ditu rnn with analysis"; - TestDituRNNPrediction(FLAGS_infer_ditu_rnn_model, FLAGS_infer_ditu_rnn_data, - FLAGS_batch_size, true, false, FLAGS_repeat); -} - -// Inference with analysis and IR. The IR module will fuse some large kernels. -TEST(Analyzer, DituRNN_with_analysis_with_IR) { - LOG(INFO) << "ditu rnn with analysis and IR fuse"; - TestDituRNNPrediction(FLAGS_infer_ditu_rnn_model, FLAGS_infer_ditu_rnn_data, - FLAGS_batch_size, true, true, FLAGS_repeat); +// Other unit-tests of DituRNN, test different options of use_analysis, +// activate_ir and multi-threads. +TEST(Analyzer, DituRNN_tests) { + int num_threads[2] = {1, 4}; + for (auto i : num_threads) { + // Directly infer with the original model. + TestDituRNNPrediction(false, false, i); + // Inference with the original model with the analysis turned on, the + // analysis + // module will transform the program to a data flow graph. + TestDituRNNPrediction(true, false, i); + // Inference with analysis and IR. The IR module will fuse some large + // kernels. + TestDituRNNPrediction(true, true, i); + } } } // namespace analysis diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index a8fa677202d8429c274a6e3fdfd18ef5d48620c2..79eeea88ea83ad862b5e2ac1390dae377b676685 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -35,7 +35,6 @@ bool AnalysisPredictor::Init( } else { place_ = paddle::platform::CPUPlace(); } - PADDLE_ENFORCE(!parent_scope); if (parent_scope) { scope_ = parent_scope; sub_scope_ = &(parent_scope->NewScope()); diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index ce4728ab8046f886fc40af3644d855a4f971fb71..2c2ac656e8005369bb0e9033236a431cb09caa15 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -14,6 +14,7 @@ #pragma once +#include #include #include #include @@ -88,5 +89,45 @@ static void TensorAssignData(PaddleTensor *tensor, } } +std::string DescribeTensor(const PaddleTensor &tensor) { + std::stringstream os; + os << "Tensor [" << tensor.name << "]\n"; + os << " - type: "; + switch (tensor.dtype) { + case PaddleDType::FLOAT32: + os << "float32"; + break; + case PaddleDType::INT64: + os << "int64"; + break; + default: + os << "unset"; + } + os << '\n'; + + os << " - shape: " << to_string(tensor.shape) << '\n'; + os << " - lod: "; + for (auto &l : tensor.lod) { + os << to_string(l) << "; "; + } + os << "\n"; + os << " - data: "; + + int dim = std::accumulate(tensor.shape.begin(), tensor.shape.end(), 1, + [](int a, int b) { return a * b; }); + for (int i = 0; i < dim; i++) { + os << static_cast(tensor.data.data())[i] << " "; + } + os << '\n'; + return os.str(); +} + +void PrintTime(int batch_size, int repeat, int num_threads, int tid, + double latency) { + LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << repeat + << ", threads: " << num_threads << ", thread id: " << tid + << ", latency: " << latency << "ms"; +} + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc index fdda01381e117cecffb2a05f8399f3ad82a46339..8e80dc0e641c443923076c31e269689b5bc134a7 100644 --- a/paddle/fluid/operators/flatten_op.cc +++ b/paddle/fluid/operators/flatten_op.cc @@ -157,6 +157,116 @@ class FlattenGradOp : public framework::OperatorBase { } }; +// FIXME(zcd): flatten2 adds an intermediate output(XShape) based on flatten, +// the XShape is used to carry the shape and lod of X which will be used in +// flatten_grad, in this way, the framework can reuse the memory of X +// immediately the flatten2_op is finished. +// Considering compatibility issues, we could not fix flatten2_op +class Flatten2OpInferShape : public FlattenOpInferShape { + public: + void operator()(framework::InferShapeContext *ctx) const override { + FlattenOpInferShape::operator()(ctx); + PADDLE_ENFORCE(ctx->HasOutput("XShape"), + "Output (XShape) of Flatten op should not be null."); + const auto &in_dims = ctx->GetInputDim("X"); + std::vector xshape_dims(in_dims.size() + 1); + xshape_dims[0] = 0; + for (int i = 0; i < in_dims.size(); ++i) { + xshape_dims[i + 1] = in_dims[i]; + } + ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims)); + ctx->ShareLoD("X", "XShape"); + } +}; + +class Flatten2Op : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axis = Attr("axis"); + auto in_dims = + scope.FindVar(Input("X"))->Get().dims(); + const auto &out_dims = FlattenOpInferShape::GetOutputShape(axis, in_dims); + + framework::AttributeMap attrs; + attrs["shape"] = out_dims; + attrs["inplace"] = false; + // Invoke Reshape Op + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}, {"XShape", {Output("XShape")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class Flatten2OpMaker : public FlattenOpMaker { + public: + void Make() override { + FlattenOpMaker::Make(); + AddOutput("XShape", + "XShape is just used to store the shape and lod of X, which will " + "be used in FlattenGradOp.") + .AsIntermediate(); + } +}; + +class Flatten2GradOpMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("flatten2_grad"); + grad_op->SetInput("XShape", Output("XShape")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class Flatten2GradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("XShape"), + "Input(XShape) shouldn't be null."); + PADDLE_ENFORCE(context->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + auto xshape_dims = context->GetInputDim("XShape"); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + context->SetOutputDim(framework::GradVarName("X"), x_dims); + context->ShareLoD("XShape", framework::GradVarName("X")); + } +}; + +class Flatten2GradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto xshape_name = Input("XShape"); + auto xshape_dims = + scope.FindVar(xshape_name)->Get().dims(); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(x_dims); + attrs["inplace"] = false; + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {dout_name}}, {"Shape", {}}}, + {{"Out", {dx_name}}, {"XShape", {xshape_name}}}, attrs); + reshape_op->Run(scope, place); + } +}; + } // namespace operators } // namespace paddle @@ -167,3 +277,8 @@ REGISTER_OPERATOR(flatten, ops::FlattenOp, ops::FlattenOpMaker, ops::FlattenOpInferShape, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(flatten_grad, ops::FlattenGradOp, ops::FlattenGradInferShape); + +REGISTER_OPERATOR(flatten2, ops::Flatten2Op, ops::Flatten2OpMaker, + ops::Flatten2OpInferShape, ops::Flatten2GradOpMaker); +REGISTER_OPERATOR(flatten2_grad, ops::Flatten2GradOp, + ops::Flatten2GradInferShape); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index a1dfe39c3a4f84f5e4aaa2306813a7decf0e49ea..d72f85f2c44db2fa887732cfc05e1376a6a79e4a 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -246,6 +246,88 @@ class ReshapeGradKernel { } }; +// FIXME(zcd): reshape2 adds an intermediate output(XShape) based on reshape, +// the XShape is used to carry the shape and lod of X which will be used in +// reshape_grad, in this way, the framework can reuse the memory of X +// immediately the reshape_op is finished. +// Considering compatibility issues, we could not fix reshape_op +class Reshape2Op : public ReshapeOp { + public: + Reshape2Op(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : ReshapeOp(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + ReshapeOp::InferShape(ctx); + PADDLE_ENFORCE(ctx->HasOutput("XShape"), + "Output(XShape) of ReshapeOp should not be null."); + const auto &x_dims = ctx->GetInputDim("X"); + std::vector xshape_dims(x_dims.size() + 1); + xshape_dims[0] = 0; + for (int i = 0; i < x_dims.size(); ++i) { + xshape_dims[i + 1] = x_dims[i]; + } + ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims)); + ctx->ShareLoD("X", /*->*/ "XShape"); + } +}; + +class Reshape2OpMaker : public ReshapeOpMaker { + public: + void Make() override { + ReshapeOpMaker::Make(); + AddOutput("XShape", + "XShape is just used to store the shape and lod of X, which will " + "be used in FlattenGradOp.") + .AsIntermediate(); + } +}; + +class Reshape2GradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("reshape2_grad"); + grad_op->SetInput("XShape", Output("XShape")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class Reshape2GradOp : public framework::OperatorWithKernel { + public: + Reshape2GradOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + auto xshape_dims = ctx->GetInputDim("XShape"); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + ctx->SetOutputDim(framework::GradVarName("X"), x_dims); + ctx->ShareLoD("XShape", framework::GradVarName("X")); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.Input(framework::GradVarName("Out")) + ->type()), + ctx.device_context()); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; @@ -261,6 +343,17 @@ REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel); +REGISTER_OPERATOR(reshape2, ops::Reshape2Op, ops::Reshape2OpMaker, + ops::Reshape2GradMaker); +REGISTER_OPERATOR(reshape2_grad, ops::Reshape2GradOp); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); + #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, ops::ReshapeKernel, int, ops::ReshapeKernel, @@ -269,4 +362,11 @@ REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, double, ops::ReshapeGradKernel, int, ops::ReshapeGradKernel, int64_t, ops::ReshapeGradKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape2_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); #endif diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index 8a683116b8054de12fc4419b5aa5fbc019b675bb..e389c6a65e1e8220685294931c4d08e6fd928b7f 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -126,15 +126,15 @@ class SqueezeOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault({}); AddComment(R"DOC( Squeeze Operator. - - Remove single-dimensional entries from the shape of a tensor. - Takes a parameter axes with a list of axes to squeeze. - If axes is not provided, all the single dimensions will be removed from the shape. + + Remove single-dimensional entries from the shape of a tensor. + Takes a parameter axes with a list of axes to squeeze. + If axes is not provided, all the single dimensions will be removed from the shape. If an axis is selected with shape entry not equal to one, an error is raised. - + Examples: Case 1: - Given + Given X.shape = (1, 3, 1, 5) and axes = [0] @@ -144,7 +144,7 @@ class SqueezeOpMaker : public framework::OpProtoAndCheckerMaker { Case 2: Given X.shape = (1, 3, 1, 5) - and + and axes = [] we get: Out.shape = (3, 5) @@ -181,6 +181,113 @@ class SqueezeGradOp : public framework::OperatorBase { } }; +// FIXME(zcd): squeeze2 adds an intermediate output(XShape) based on squeeze, +// the XShape is used to carry the shape and lod of X which will be used in +// squeeze_grad, in this way, the framework can reuse the memory of X +// immediately the squeeze2_op is finished. +// Considering compatibility issues, we could not fix squeeze2_op +class Squeeze2OpMaker : public SqueezeOpMaker { + public: + void Make() override { + SqueezeOpMaker::Make(); + AddOutput("XShape", + "XShape is just used to store the shape and lod of X, which will " + "be used in SqueezeGradOp.") + .AsIntermediate(); + } +}; + +class Squeeze2OpInferShape : public SqueezeOpInferShape { + public: + void operator()(framework::InferShapeContext *ctx) const override { + SqueezeOpInferShape::operator()(ctx); + PADDLE_ENFORCE(ctx->HasOutput("XShape"), + "Output(XShape) of Squeeze operator should not be null."); + const auto &x_dims = ctx->GetInputDim("X"); + std::vector xshape_dims(x_dims.size() + 1); + xshape_dims[0] = 0; + for (int i = 0; i < x_dims.size(); ++i) { + xshape_dims[i + 1] = x_dims[i]; + } + ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims)); + ctx->ShareLoD("X", /*->*/ "XShape"); + } +}; + +class Squeeze2Op : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axes = Attr>("axes"); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + auto out_dims = Squeeze2OpInferShape::GetOutputShape(axes, x_dims); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(out_dims); + // Invoke Reshape Op + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}, {"XShape", {Output("XShape")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class Squeeze2GradOpMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("squeeze2_grad"); + grad_op->SetInput("XShape", Output("XShape")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class Squeeze2GradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("XShape"), + "Input(XShape) shouldn't be null."); + PADDLE_ENFORCE(context->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + auto xshape_dims = context->GetInputDim("XShape"); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + context->SetOutputDim(framework::GradVarName("X"), x_dims); + context->ShareLoD("XShape", framework::GradVarName("X")); + } +}; + +class Squeeze2GradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto xshape_name = Input("XShape"); + auto xshape_dims = + scope.FindVar(xshape_name)->Get().dims(); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(x_dims); + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {dout_name}}, {"Shape", {}}}, + {{"Out", {dx_name}}, {"XShape", {xshape_name}}}, attrs); + reshape_op->Run(scope, place); + } +}; + } // namespace operators } // namespace paddle @@ -192,3 +299,8 @@ REGISTER_OPERATOR(squeeze, ops::SqueezeOp, ops::SqueezeOpMaker, ops::SqueezeOpInferShape, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(squeeze_grad, ops::SqueezeGradOp, ops::SqueezeGradInferShape); + +REGISTER_OPERATOR(squeeze2, ops::Squeeze2Op, ops::Squeeze2OpMaker, + ops::Squeeze2OpInferShape, ops::Squeeze2GradOpMaker); +REGISTER_OPERATOR(squeeze2_grad, ops::Squeeze2GradOp, + ops::Squeeze2GradInferShape); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 60556a564c25c08612447ebd47a4b432b8a12d29..6a9fc6611a8f8eaa6749aefac0673ccabaebbcfe 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/transpose_op.h" +#include #include namespace paddle { @@ -24,7 +25,7 @@ class TransposeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); auto x_dims = ctx->GetInputDim("X"); @@ -90,7 +91,7 @@ The behavior of this operator is similar to how `numpy.transpose` works. 2 &5 \end{pmatrix}$$ -- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is +- Given a input tensor with shape $(N, C, H, W)$ and the `axes` is $[0, 2, 3, 1]$, then shape of the output tensor will be: $(N, H, W, C)$. )DOC"); @@ -101,7 +102,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override { + void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); @@ -113,6 +114,93 @@ class TransposeOpGrad : public framework::OperatorWithKernel { } }; +// FIXME(zcd): transpose2 adds an intermediate output(XShape) based on +// transpose, the XShape is used to carry the shape and lod of X which +// will be used in transpose_grad, in this way, the framework can reuse +// the memory of X immediately the transpose2_op is finished. +// Considering compatibility issues, we could not fix transpose2_op +class Transpose2Op : public TransposeOp { + public: + Transpose2Op(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : TransposeOp(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + TransposeOp::InferShape(ctx); + PADDLE_ENFORCE(ctx->HasOutput("XShape"), + "Output(XShape) should not be null"); + const auto &in_dims = ctx->GetInputDim("X"); + std::vector x_shape_dim(in_dims.size() + 1); + x_shape_dim[0] = 0; + for (int i = 0; i < in_dims.size(); ++i) { + x_shape_dim[i + 1] = in_dims[i]; + } + ctx->SetOutputDim("XShape", framework::make_ddim(x_shape_dim)); + ctx->ShareLoD("X", /*->*/ "XShape"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +class Transpose2OpMaker : public TransposeOpMaker { + public: + void Make() override { + TransposeOpMaker::Make(); + AddOutput("XShape", "(Tensor)The output tensor.").AsIntermediate(); + } +}; + +class Transpose2GradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("transpose2_grad"); + grad_op->SetInput("XShape", Output("XShape")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class Transpose2OpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + if (ctx->HasOutput(framework::GradVarName("X"))) { + auto xshape_dim = ctx->GetInputDim("XShape"); + auto x_shape_dim = + framework::slice_ddim(xshape_dim, 1, xshape_dim.size()); + ctx->SetOutputDim(framework::GradVarName("X"), x_shape_dim); + ctx->ShareLoD("XShape", framework::GradVarName("X")); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.Input(framework::GradVarName("Out")) + ->type()), + ctx.device_context()); + } +}; + } // namespace operators } // namespace paddle @@ -120,8 +208,20 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(transpose, ops::TransposeOp, ops::TransposeOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(transpose_grad, ops::TransposeOpGrad); + REGISTER_OP_CPU_KERNEL( transpose, ops::TransposeKernel); REGISTER_OP_CPU_KERNEL( transpose_grad, ops::TransposeGradKernel); + +REGISTER_OPERATOR(transpose2, ops::Transpose2Op, ops::Transpose2OpMaker, + ops::Transpose2GradMaker); +REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad); + +REGISTER_OP_CPU_KERNEL( + transpose2, + ops::TransposeKernel); +REGISTER_OP_CPU_KERNEL( + transpose2_grad, + ops::TransposeGradKernel); diff --git a/paddle/fluid/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc index bcd1fb631394bc33b6fc162cfa7cbb20d55a654b..c1b5a8b31be243fab3af06a18c8e51986c953700 100644 --- a/paddle/fluid/operators/transpose_op.cu.cc +++ b/paddle/fluid/operators/transpose_op.cu.cc @@ -21,3 +21,10 @@ REGISTER_OP_CUDA_KERNEL( REGISTER_OP_CUDA_KERNEL( transpose_grad, ops::TransposeGradKernel); + +REGISTER_OP_CUDA_KERNEL( + transpose2, + ops::TransposeKernel); +REGISTER_OP_CUDA_KERNEL( + transpose2_grad, + ops::TransposeGradKernel); diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc index 0fc8d54f6400c9dfb6af1e764ed44e95195bfe6e..405943add238ac2d245df11127bfadb4899e855f 100644 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -127,13 +127,13 @@ class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker { }); AddComment(R"DOC( Unsqueeze Operator. - - Insert single-dimensional entries to the shape of a tensor. - Takes one required argument axes, a list of dimensions that will be inserted. - Dimension indices in axes are as seen in the output tensor. - For example: - Given a tensor such that tensor with shape [3, 4, 5], + Insert single-dimensional entries to the shape of a tensor. + Takes one required argument axes, a list of dimensions that will be inserted. + Dimension indices in axes are as seen in the output tensor. + + For example: + Given a tensor such that tensor with shape [3, 4, 5], then Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1] )DOC"); } @@ -168,6 +168,112 @@ class UnsqueezeGradOp : public framework::OperatorBase { } }; +// FIXME(zcd): unsqueeze2 adds an intermediate output(XShape) based on +// unsqueeze, the XShape is used to carry the shape and lod of X which +// will be used in unsqueeze_grad, in this way, the framework can reuse +// the memory of X immediately the unsqueeze2_op is finished. +// Considering compatibility issues, we could not fix unsqueeze2_op +class Unsqueeze2OpInferShape : public UnsqueezeOpInferShape { + public: + void operator()(framework::InferShapeContext *ctx) const override { + UnsqueezeOpInferShape::operator()(ctx); + PADDLE_ENFORCE(ctx->HasOutput("XShape"), + "Output(XShape) of Unsqueeze operator should not be null."); + const auto &x_dims = ctx->GetInputDim("X"); + std::vector xshape_dims(x_dims.size() + 1); + xshape_dims[0] = 0; + for (int i = 0; i < x_dims.size(); ++i) { + xshape_dims[i + 1] = x_dims[i]; + } + ctx->SetOutputDim("XShape", framework::make_ddim(xshape_dims)); + ctx->ShareLoD("X", /*->*/ "XShape"); + } +}; + +class Unsqueeze2OpMaker : public UnsqueezeOpMaker { + public: + void Make() override { + UnsqueezeOpMaker::Make(); + AddOutput("XShape", + "XShape is just used to store the shape and lod of X, which will " + "be used in UnsqueezeGradOp.") + .AsIntermediate(); + } +}; + +class Unsqueeze2Op : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axes = Attr>("axes"); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + auto out_dims = Unsqueeze2OpInferShape::GetOutputShape(axes, x_dims); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(out_dims); + // Invoke Reshape op. + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}, {"XShape", {Output("XShape")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class Unsqueeze2GradOpMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("unsqueeze2_grad"); + grad_op->SetInput("XShape", Output("XShape")); + grad_op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + grad_op->SetAttrMap(Attrs()); + return std::unique_ptr(grad_op); + } +}; + +class Unsqueeze2GradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + PADDLE_ENFORCE(context->HasInput("XShape"), + "Input(XShape) shouldn't be null."); + PADDLE_ENFORCE(context->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + auto xshape_dims = context->GetInputDim("XShape"); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + context->SetOutputDim(framework::GradVarName("X"), x_dims); + context->ShareLoD("XShape", framework::GradVarName("X")); + } +}; + +class Unsqueeze2GradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto xshape_name = Input("XShape"); + auto xshape_dims = + scope.FindVar(xshape_name)->Get().dims(); + auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(x_dims); + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape2", {{"X", {dout_name}}, {"Shape", {}}}, + {{"Out", {dx_name}}, {"XShape", {xshape_name}}}, attrs); + reshape_op->Run(scope, place); + } +}; } // namespace operators } // namespace paddle @@ -180,3 +286,8 @@ REGISTER_OPERATOR(unsqueeze, ops::UnsqueezeOp, ops::UnsqueezeOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(unsqueeze_grad, ops::UnsqueezeGradOp, ops::UnsqueezeGradInferShape); + +REGISTER_OPERATOR(unsqueeze2, ops::Unsqueeze2Op, ops::Unsqueeze2OpMaker, + ops::Unsqueeze2OpInferShape, ops::Unsqueeze2GradOpMaker); +REGISTER_OPERATOR(unsqueeze2_grad, ops::Unsqueeze2GradOp, + ops::Unsqueeze2GradInferShape); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d8c7cc08b652f91456f557b0296e85b9aebc9dd0..5f49d5bbff53096ece140a185f73722870924677 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4025,10 +4025,12 @@ def transpose(x, perm, name=None): helper = LayerHelper('transpose', **locals()) out = helper.create_tmp_variable(x.dtype) + x_shape = helper.create_tmp_variable(x.dtype) helper.append_op( - type='transpose', + type='transpose2', inputs={'X': [x]}, - outputs={'Out': [out]}, + outputs={'Out': [out], + 'XShape': [x_shape]}, attrs={'axis': perm}) return out @@ -4520,13 +4522,15 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): "Each dimension size given in shape must not be negtive " "except one unknown dimension.") - helper = LayerHelper("reshape", **locals()) + helper = LayerHelper("reshape2", **locals()) out = helper.create_tmp_variable(dtype=x.dtype) + x_shape = helper.create_tmp_variable(dtype=x.dtype) helper.append_op( - type="reshape", + type="reshape2", inputs=inputs, attrs={"shape": shape}, - outputs={"Out": out}) + outputs={"Out": out, + "XShape": x_shape}) return helper.append_activation(out) @@ -4570,11 +4574,13 @@ def squeeze(input, axes, name=None): """ helper = LayerHelper("squeeze", **locals()) out = helper.create_tmp_variable(dtype=input.dtype) + x_shape = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( - type="squeeze", + type="squeeze2", inputs={"X": input}, attrs={"axes": axes}, - outputs={"Out": out}) + outputs={"Out": out, + "XShape": x_shape}) return out @@ -4605,11 +4611,13 @@ def unsqueeze(input, axes, name=None): """ helper = LayerHelper("unsqueeze", **locals()) out = helper.create_tmp_variable(dtype=input.dtype) + x_shape = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( - type="unsqueeze", + type="unsqueeze2", inputs={"X": input}, attrs={"axes": axes}, - outputs={"Out": out}) + outputs={"Out": out, + "XShape": x_shape}) return out @@ -5811,10 +5819,12 @@ def flatten(x, axis=1, name=None): raise ValueError("The axis should be a int, and in range [0, rank(x)]") out = helper.create_tmp_variable(x.dtype) + x_shape = helper.create_tmp_variable(x.dtype) helper.append_op( - type='flatten', + type='flatten2', inputs={"X": x}, - outputs={'Out': out}, + outputs={'Out': out, + 'XShape': x_shape}, attrs={"axis": axis}) return out diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 868df5d79424302700d9f82083cd7e4320c707ea..56a242b996f67aa4b9c858ab8aaeb1c1cd3bcf60 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -249,7 +249,7 @@ class OpTest(unittest.TestCase): outs, _ = self._calc_output(place) return outs - def _calc_output(self, place, parallel=False): + def _calc_output(self, place, parallel=False, no_check_set=None): program = Program() block = program.global_block() @@ -273,6 +273,8 @@ class OpTest(unittest.TestCase): # if not, fill the fetch_list by the user configured outputs in test. if len(fetch_list) == 0: for var_name, var in six.iteritems(outputs): + if no_check_set is not None and var_name in no_check_set: + continue if isinstance(var, list): for v in var: fetch_list.append(v) @@ -291,11 +293,17 @@ class OpTest(unittest.TestCase): return_numpy=False) return outs, fetch_list - def check_output_with_place(self, place, atol, equal_nan=False): - outs, fetch_list = self._calc_output(place) + def check_output_with_place(self, + place, + atol, + no_check_set=None, + equal_nan=False): + outs, fetch_list = self._calc_output(place, no_check_set=no_check_set) for out_name, out_dup in Operator.get_op_outputs(self.op_type): if out_name not in self.outputs: continue + if no_check_set is not None and out_name in no_check_set: + continue def find_actual(target_name, fetch_list): found = [ @@ -360,10 +368,10 @@ class OpTest(unittest.TestCase): places.append(core.CUDAPlace(0)) return places - def check_output(self, atol=1e-5, equal_nan=False): + def check_output(self, atol=1e-5, no_check_set=None, equal_nan=False): places = self._get_places() for place in places: - self.check_output_with_place(place, atol, equal_nan) + self.check_output_with_place(place, atol, no_check_set, equal_nan) def check_output_customized(self, checker): places = self._get_places() diff --git a/python/paddle/fluid/tests/unittests/test_flatten_op.py b/python/paddle/fluid/tests/unittests/test_flatten_op.py index 17b01e03124e8007c51107b414c628d4bfc49c79..effa2a148eef8b0047b12c676803abb2871e8118 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_op.py @@ -22,14 +22,17 @@ from op_test import OpTest class TestFlattenOp(OpTest): def setUp(self): - self.op_type = "flatten" + self.op_type = "flatten2" self.init_test_case() self.inputs = {"X": np.random.random(self.in_shape).astype("float32")} self.init_attrs() - self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + self.outputs = { + "Out": self.inputs["X"].reshape(self.new_shape), + "XShape": np.random.random(self.in_shape).astype("float32") + } def test_check_output(self): - self.check_output() + self.check_output(no_check_set=["XShape"]) def test_check_grad(self): self.check_grad(["X"], "Out") diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index 1de35dc35b0176b77eb2d9b25cd6ee4e645e56c3..0557593657e2e480a509902a07f25723b2c710b0 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -22,106 +22,39 @@ from op_test import OpTest class TestReshapeOp(OpTest): def setUp(self): - ori_shape = (2, 25) - new_shape = (5, 10) - - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Out") - - -class TestReshapeOpDimInfer1(OpTest): - def setUp(self): - ori_shape = (5, 10) - new_shape = (5, -1, 5) - - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Out") - - -class TestReshapeOpDimInfer2(OpTest): - def setUp(self): - ori_shape = (2, 2, 6) - new_shape = (2, 0, 3, -1) - infered_shape = (2, 2, 3, -1) - - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Out") - - -class TestReshapeOpInplace(OpTest): - def setUp(self): - ori_shape = (2, 25) - new_shape = (5, 10) - - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(["X"], "Out") - - -class TestReshapeOpDimInferInplace1(OpTest): - def setUp(self): - ori_shape = (5, 10) - new_shape = (5, -1, 5) + self.init_data() + self.op_type = "reshape2" + self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} + self.attrs = {"shape": self.new_shape} + self.outputs = { + "Out": self.inputs["X"].reshape(self.infered_shape), + 'XShape': np.random.random(self.ori_shape).astype("float32") + } - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} + def init_data(self): + self.ori_shape = (2, 25) + self.new_shape = (5, 10) + self.infered_shape = (5, 10) def test_check_output(self): - self.check_output() + self.check_output(no_check_set=['XShape']) def test_check_grad(self): self.check_grad(["X"], "Out") -class TestReshapeOpDimInferInplace2(OpTest): - def setUp(self): - ori_shape = (2, 2, 6) - new_shape = (2, 0, 3, -1) - infered_shape = (2, 2, 3, -1) - - self.op_type = "reshape" - self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} +class TestReshapeOpDimInfer1(TestReshapeOp): + def init_data(self): + self.ori_shape = (5, 10) + self.new_shape = (5, -1, 5) + self.infered_shape = (5, -1, 5) - def test_check_output(self): - self.check_output() - def test_check_grad(self): - self.check_grad(["X"], "Out") +class TestReshapeOpDimInfer2(TestReshapeOp): + def init_data(self): + self.ori_shape = (2, 2, 6) + self.new_shape = (2, 0, 3, -1) + self.infered_shape = (2, 2, 3, -1) class TestReshapeOpWithInputShape(OpTest): @@ -130,20 +63,23 @@ class TestReshapeOpWithInputShape(OpTest): new_shape = (0, -1, 5) actual_shape = (2, 3, 5) - self.op_type = "reshape" + self.op_type = "reshape2" self.inputs = { "X": np.random.random(ori_shape).astype("float32"), "Shape": np.array( actual_shape, dtype="int32") } self.attrs = {"shape": new_shape} - self.outputs = {"Out": self.inputs["X"].reshape(actual_shape)} + self.outputs = { + "Out": self.inputs["X"].reshape(actual_shape), + 'XShape': np.random.random(ori_shape).astype("float32") + } def test_check_output(self): - self.check_output() + self.check_output(no_check_set=['XShape']) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", sum_outputs=["Out"]) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index 2be8e24a0fae6945351eb767ac924d7ca70848ab..204a4bb40196bd1fc2f5861aa31cf9560ea4d349 100644 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -23,14 +23,17 @@ from op_test import OpTest # Correct: General. class TestSqueezeOp(OpTest): def setUp(self): - self.op_type = "squeeze" + self.op_type = "squeeze2" self.init_test_case() self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} self.init_attrs() - self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + self.outputs = { + "Out": self.inputs["X"].reshape(self.new_shape), + "XShape": np.random.random(self.ori_shape).astype("float32") + } def test_check_output(self): - self.check_output() + self.check_output(no_check_set=['XShape']) def test_check_grad(self): self.check_grad(["X"], "Out") diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 0853f80b82030679d140f7fabdd42557c2374599..c30da2389d50d3b6bdf1f911aaed6ed71f274153 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -22,16 +22,19 @@ from op_test import OpTest class TestTransposeOp(OpTest): def setUp(self): self.initTestCase() - self.op_type = "transpose" + self.op_type = "transpose2" self.inputs = {'X': np.random.random(self.shape).astype("float32")} self.attrs = {'axis': list(self.axis)} - self.outputs = {'Out': self.inputs['X'].transpose(self.axis)} + self.outputs = { + 'XShape': np.random.random(self.shape).astype("float32"), + 'Out': self.inputs['X'].transpose(self.axis) + } def test_check_output(self): - self.check_output() + self.check_output(no_check_set=['XShape']) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', sum_outputs=['Out']) def initTestCase(self): self.shape = (3, 4) diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py index a324438ba5a3c3b57fd956bd11189ef7d50267e2..14dd2bb06f9a18d0b15a4aee4e9e6bfdf8c41206 100644 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -24,13 +24,16 @@ from op_test import OpTest class TestUnsqueezeOp(OpTest): def setUp(self): self.init_test_case() - self.op_type = "unsqueeze" + self.op_type = "unsqueeze2" self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} self.init_attrs() - self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + self.outputs = { + "Out": self.inputs["X"].reshape(self.new_shape), + "XShape": np.random.random(self.ori_shape).astype("float32") + } def test_check_output(self): - self.check_output() + self.check_output(no_check_set=["XShape"]) def test_check_grad(self): self.check_grad(["X"], "Out")