From 8a1eeec579ed5192be19abe06f25e95194f21a84 Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Thu, 15 Nov 2018 12:46:24 +0100 Subject: [PATCH] add mkldnn prop_kind phase for inference-only case to pooling and activations (#14278) * add is_test to pooling and activations add prop_kind support for layers activation. conv and pooling add a pass that sets is_test to true add transpiler version of is_test pass test=develop * patch test and pass test=develop * add pass to analyzer.h test=develop * add is_test attr description & pass only on mkldnn in: activation_op.cc batch_norm_op.cc conv_op.cc dropout_op.cc lrn_op.cc pool_op.cc sequence_pool_op.cc softmax_op.cc * fix is_test handling for activation pool and conv * change description of is_test for all layers again * remove GetAttr(use_mkldnn) from pass * rename correct_mkldnn_test_phase to is_test and remove dependency on MKLDNN test=develop * review fix magic number * two if(..)s into one * Check is_test once and pass mkldnn forward prop kind * dereference shared_ptr with * (without get()) test=develop * add is_test_pass back test=develop --- paddle/fluid/framework/ir/CMakeLists.txt | 2 + paddle/fluid/framework/ir/is_test_pass.cc | 57 +++++++++ paddle/fluid/framework/ir/is_test_pass.h | 31 +++++ .../fluid/framework/ir/is_test_pass_tester.cc | 117 ++++++++++++++++++ .../fluid/inference/api/paddle_pass_builder.h | 1 + .../fluid/operators/activation_mkldnn_op.cc | 19 ++- paddle/fluid/operators/activation_op.cc | 33 ++--- paddle/fluid/operators/batch_norm_op.cc | 5 +- paddle/fluid/operators/conv_mkldnn_op.cc | 36 +++--- paddle/fluid/operators/conv_op.cc | 5 +- paddle/fluid/operators/dropout_op.cc | 5 +- paddle/fluid/operators/fake_quantize_op.cc | 9 +- paddle/fluid/operators/lrn_op.cc | 4 +- paddle/fluid/operators/pool_mkldnn_op.cc | 38 ++++-- paddle/fluid/operators/pool_op.cc | 5 + paddle/fluid/operators/sequence_pool_op.cc | 5 +- paddle/fluid/operators/softmax_op.cc | 17 +-- paddle/fluid/operators/while_op.cc | 5 +- .../fluid/transpiler/inference_transpiler.py | 32 +++++ 19 files changed, 362 insertions(+), 64 deletions(-) create mode 100644 paddle/fluid/framework/ir/is_test_pass.cc create mode 100644 paddle/fluid/framework/ir/is_test_pass.h create mode 100644 paddle/fluid/framework/ir/is_test_pass_tester.cc diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 504f7e6d6c1..883575e41db 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -41,6 +41,7 @@ pass_library(seq_concat_fc_fuse_pass inference) pass_library(multi_batch_merge_pass base) pass_library(conv_bn_fuse_pass inference) pass_library(seqconv_eltadd_relu_fuse_pass inference) +pass_library(is_test_pass base) if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base) pass_library(depthwise_conv_mkldnn_pass base) @@ -62,6 +63,7 @@ cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_r cc_test(graph_to_program_pass_test SRCS graph_to_program_pass_test.cc DEPS graph_to_program_pass) cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS graph_pattern_detector) cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) +cc_test(test_is_test_pass SRCS is_test_pass_tester.cc DEPS is_test_pass) if (WITH_MKLDNN) cc_test(test_depthwise_conv_mkldnn_pass SRCS depthwise_conv_mkldnn_pass_tester.cc DEPS depthwise_conv_mkldnn_pass) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) diff --git a/paddle/fluid/framework/ir/is_test_pass.cc b/paddle/fluid/framework/ir/is_test_pass.cc new file mode 100644 index 00000000000..292f232ffce --- /dev/null +++ b/paddle/fluid/framework/ir/is_test_pass.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/is_test_pass.h" +#include +#include + +namespace paddle { +namespace framework { +namespace ir { + +std::unique_ptr IsTestPass::ApplyImpl( + std::unique_ptr graph) const { + VLOG(3) << "Sets is_test attrbiute to true and if it is missing, inserts it " + "for activations and pooling."; + auto op_list = {"pool2d", "sigmoid", "logsigmoid", + "softshrink", "exp", "brelu", + "pow", "leaky_relu", "stanh", + "relu", "tanh", "tanh_shrink", + "sqrt", "abs", "ceil", + "elu", "floor", "cos", + "sin", "round", "reciprocal", + "hard_shrink", "hard_sigmoid", "relu6", + "soft_relu", "swish", "thresholded_relu", + "log", "square", "softplus", + "softsign"}; + for (const Node* n : graph->Nodes()) { + if (n->IsOp()) { + auto* op = n->Op(); + if (op->HasAttr("is_test")) { + op->SetAttr("is_test", true); + } else if (std::find(begin(op_list), end(op_list), op->Type()) != + end(op_list)) { + op->MutableAttrMap()->insert( + std::pair("is_test", true)); + } + } + } + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(is_test_pass, paddle::framework::ir::IsTestPass); diff --git a/paddle/fluid/framework/ir/is_test_pass.h b/paddle/fluid/framework/ir/is_test_pass.h new file mode 100644 index 00000000000..99e76ca4a3d --- /dev/null +++ b/paddle/fluid/framework/ir/is_test_pass.h @@ -0,0 +1,31 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +class IsTestPass : public Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/is_test_pass_tester.cc b/paddle/fluid/framework/ir/is_test_pass_tester.cc new file mode 100644 index 00000000000..cd2cb0c9f8a --- /dev/null +++ b/paddle/fluid/framework/ir/is_test_pass_tester.cc @@ -0,0 +1,117 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/is_test_pass.h" + +#include + +namespace paddle { +namespace framework { +namespace ir { + +enum class ISTEST_STATE { FALSE, TRUE, UNSET }; + +void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name, + const std::vector& inputs, + const std::vector& outputs, bool use_mkldnn = false, + ISTEST_STATE is_test = ISTEST_STATE::UNSET) { + auto* op = prog->MutableBlock(0)->AppendOp(); + op->SetType(type); + op->SetAttr("name", name); + op->SetInput("X", inputs); + op->SetOutput("Out", outputs); + op->SetAttr("use_mkldnn", use_mkldnn); + if (is_test == ISTEST_STATE::UNSET) + op->MutableAttrMap()->erase("is_test"); + else if (is_test == ISTEST_STATE::FALSE) + op->SetAttr("is_test", false); + else + op->SetAttr("is_test", true); +} + +// a->pool2d->b +// b->relu->c +// c,weights1)->conv2d->d +// +// d->pool2d->e +// e->hard_sigmoid->f +// (f,weights2)->conv2d->g +// +// g->pool2d->h +// h->tanh->i +// (i,weights3)->conv2d->j +ProgramDesc BuildProgramDesc() { + ProgramDesc prog; + for (auto& v : + std::vector({"a", "b", "c", "d", "e", "f", "g", "h", "i", + "j", "weights1", "weights2", "weights3"})) { + auto* var = prog.MutableBlock(0)->Var(v); + var->SetType(proto::VarType::SELECTED_ROWS); + if (v == "weights1" || v == "weights2" || v == "weights3") { + var->SetPersistable(true); + } + } + + SetOp(&prog, "pool2d", "pooling1", std::vector({"a"}), + std::vector({"b"}), true, ISTEST_STATE::TRUE); + SetOp(&prog, "relu", "activation1", std::vector({"b"}), + std::vector({"c"}), true, ISTEST_STATE::TRUE); + SetOp(&prog, "conv2d", "conv1", std::vector({"c", "weights1"}), + std::vector({"d"}), true, ISTEST_STATE::TRUE); + + SetOp(&prog, "pool2d", "pooling2", std::vector({"d"}), + std::vector({"e"}), false, ISTEST_STATE::FALSE); + SetOp(&prog, "hard_sigmoid", "activation2", std::vector({"e"}), + std::vector({"f"}), false, ISTEST_STATE::FALSE); + SetOp(&prog, "conv2d", "conv2", std::vector({"f", "weights2"}), + std::vector({"g"}), false, ISTEST_STATE::FALSE); + + SetOp(&prog, "pool2d", "pooling3", std::vector({"g"}), + std::vector({"h"}), false, ISTEST_STATE::UNSET); + SetOp(&prog, "tanh", "activation3", std::vector({"h"}), + std::vector({"i"}), true, ISTEST_STATE::UNSET); + SetOp(&prog, "conv2d", "conv3", std::vector({"i", "weights3"}), + std::vector({"j"}), false, ISTEST_STATE::UNSET); + + return prog; +} + +TEST(IsTestPass, basic) { + auto prog = BuildProgramDesc(); + + std::unique_ptr graph(new ir::Graph(prog)); + + auto pass = PassRegistry::Instance().Get("is_test_pass"); + + graph = pass->Apply(std::move(graph)); + + for (auto* node : graph->Nodes()) { + if (node->IsOp()) { + auto* op = node->Op(); + auto op_name = boost::get(op->GetAttr("name")); + if (op_name == "conv3") { + ASSERT_FALSE(op->HasAttr("is_test")); + } else { + ASSERT_TRUE(op->HasAttr("is_test")); + EXPECT_TRUE(boost::get(op->GetAttr("is_test"))); + } + } + } +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +USE_PASS(is_test_pass); diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 80658d30850..825bee833bf 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -86,6 +86,7 @@ class CpuPassStrategy : public PassStrategy { "fc_fuse_pass", // "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // + "is_test_pass", // }); } diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index 137bca5e2b8..64649b1a5e4 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -71,6 +71,10 @@ class MKLDNNActivationGradKernel diff_y->format() != memory::format::format_undef, "Wrong layout/format set for Input OutGrad tensor"); + PADDLE_ENFORCE( + !ctx.Attr("is_test"), + "is_test attribute should be set to False in training phase."); + Functor functor; auto attrs = functor.GetAttrs(); @@ -115,11 +119,15 @@ void eltwise_forward(const framework::ExecutionContext &ctx, const std::string key_fwd = key_with_layout + "@eltwise_fwd"; const std::string key_fwd_pd = key_with_layout + "@eltwise_fwd_pd"; + bool is_test = ctx.Attr("is_test"); + // save input data and layout to be referred in backward path auto p_src_data = std::make_shared(x_data); - dev_ctx.SetBlob(key_src_data, p_src_data); auto p_src_layout = std::make_shared(src_format); - dev_ctx.SetBlob(key_src_layout, p_src_layout); + if (!is_test) { + dev_ctx.SetBlob(key_src_data, p_src_data); + dev_ctx.SetBlob(key_src_layout, p_src_layout); + } auto p_fwd = std::static_pointer_cast( dev_ctx.GetBlob(key_fwd)); @@ -136,14 +144,17 @@ void eltwise_forward(const framework::ExecutionContext &ctx, dev_ctx.SetBlob(key_src_mem, src_memory); // create primitive descriptor for activation forward and save it + auto mkldnn_forward_prop_kind = is_test + ? mkldnn::prop_kind::forward_inference + : mkldnn::prop_kind::forward_training; auto forward_desc = mkldnn::eltwise_forward::desc( - mkldnn::prop_kind::forward_training, algorithm, + mkldnn_forward_prop_kind, algorithm, src_memory->get_primitive_desc().desc(), alpha, beta); auto forward_pd = std::make_shared( forward_desc, mkldnn_engine); // save prim desc into global device context to be referred in backward path - dev_ctx.SetBlob(key_fwd_pd, forward_pd); + if (!is_test) dev_ctx.SetBlob(key_fwd_pd, forward_pd); // create mkldnn memory for output y dst_memory = diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index ea260a3e92b..bb9ea3f3ba0 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -22,18 +22,23 @@ namespace operators { using paddle::framework::Tensor; -#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ - class OP_NAME##OpMaker \ - : public ::paddle::framework::OpProtoAndCheckerMaker { \ - public: \ - void Make() override { \ - AddInput("X", "Input of " #OP_NAME " operator"); \ - AddOutput("Out", "Output of " #OP_NAME " operator"); \ - AddAttr("use_mkldnn", \ - "(bool, default false) Only used in mkldnn kernel") \ - .SetDefault(false); \ - AddComment(#OP_COMMENT); \ - } \ +#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ + class OP_NAME##OpMaker \ + : public ::paddle::framework::OpProtoAndCheckerMaker { \ + public: \ + void Make() override { \ + AddInput("X", "Input of " #OP_NAME " operator"); \ + AddOutput("Out", "Output of " #OP_NAME " operator"); \ + AddAttr("use_mkldnn", \ + "(bool, default false) Only used in mkldnn kernel") \ + .SetDefault(false); \ + AddAttr( \ + "is_test", \ + "(bool, default false) Set to true for inference only, false " \ + "for training. Some layers may run faster when this is true.") \ + .SetDefault(false); \ + AddComment(#OP_COMMENT); \ + } \ } #define REGISTER_ACTIVATION_OP_GRAD_MAKER(OP_NAME, KERNEL_TYPE) \ @@ -269,7 +274,7 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { :strong:`Softshrink Activation Operator` .. math:: - out = \begin{cases} + out = \begin{cases} x - \lambda, \text{if } x > \lambda \\ x + \lambda, \text{if } x < -\lambda \\ 0, \text{otherwise} @@ -435,7 +440,7 @@ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( HardSigmoid Activation Operator. -Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), +Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), which is much faster than sigmoid. $out = \max(0, \min(1, slope * x + shift))$ diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index cf245f5038f..2463c939bc5 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -113,7 +113,10 @@ class BatchNormOp : public framework::OperatorWithKernel { class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddAttr("is_test", "").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddAttr("momentum", "").SetDefault(0.9); AddAttr("epsilon", "") .SetDefault(1e-5) diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index f2cc6642ee6..c3c7c90f150 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -383,20 +383,22 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { // create a conv primitive descriptor and save it for usage in backward std::shared_ptr conv_pd; + auto fwd_prop_kind = is_test ? mkldnn::prop_kind::forward_inference + : mkldnn::prop_kind::forward_training; if (bias) { bias_tz = paddle::framework::vectorize2int(bias->dims()); auto bias_md = platform::MKLDNNMemDesc( bias_tz, platform::MKLDNNGetDataType(), memory::format::x); - conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, bias_md, dst_md, - strides, paddings, mkldnn_engine, - fuse_relu, fuse_residual_conn); + conv_pd = ConvFwdPrimitiveDesc( + src_md, weights_md, bias_md, dst_md, strides, paddings, mkldnn_engine, + fuse_relu, fuse_residual_conn, fwd_prop_kind); } else { - conv_pd = - ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, paddings, - mkldnn_engine, fuse_relu, fuse_residual_conn); + conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, + paddings, mkldnn_engine, fuse_relu, + fuse_residual_conn, fwd_prop_kind); } // Save conv_pd/src_memory/weights_memory for backward pass - dev_ctx.SetBlob(key_conv_pd, conv_pd); + if (!is_test) dev_ctx.SetBlob(key_conv_pd, conv_pd); ConvMKLDNNHandler handler(conv_pd, dev_ctx, mkldnn_engine, key); @@ -510,14 +512,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const memory::desc& dst, const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, - const bool fuse_residual_conn) const { + const bool fuse_residual_conn, + mkldnn::prop_kind fwd_prop_kind) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; auto conv_desc = mkldnn::convolution_forward::desc( - mkldnn::prop_kind::forward, mkldnn::convolution_direct, src, weights, - dst, stride_dims, padding_dims, padding_dims, - mkldnn::padding_kind::zero); + fwd_prop_kind, mkldnn::convolution_direct, src, weights, dst, + stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_relu, fuse_residual_conn); @@ -535,14 +537,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine, const bool fuse_relu, - const bool fuse_residual_conn) const { + const bool fuse_residual_conn, + mkldnn::prop_kind fwd_prop_kind) const { memory::dims stride_dims = {strides[0], strides[1]}; memory::dims padding_dims = {paddings[0], paddings[1]}; auto conv_desc = mkldnn::convolution_forward::desc( - mkldnn::prop_kind::forward, mkldnn::convolution_direct, src, weights, - bias, dst, stride_dims, padding_dims, padding_dims, - mkldnn::padding_kind::zero); + fwd_prop_kind, mkldnn::convolution_direct, src, weights, bias, dst, + stride_dims, padding_dims, padding_dims, mkldnn::padding_kind::zero); mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_relu, fuse_residual_conn); @@ -587,6 +589,10 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { output_grad->format() != memory::format::format_undef, "Wrong layout/format set for output_grad tensor"); + PADDLE_ENFORCE( + !ctx.Attr("is_test"), + "is_test attribute should be set to False in training phase."); + if (!input_grad && !filter_grad) return; std::vector strides = ctx.Attr>("strides"); diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 4d370746382..1ac4bef615a 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -109,7 +109,10 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( } void Conv2DOpMaker::Make() { - AddAttr("is_test", "").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddInput( "Input", "(Tensor) The input tensor of convolution operator. " diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index 3c28ef30922..dd3474dd252 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -49,7 +49,10 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { PADDLE_ENFORCE(drop_p >= 0.0f && drop_p <= 1.0f, "'dropout_prob' must be between 0.0 and 1.0."); }); - AddAttr("is_test", "True if in test phase.").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddAttr("fix_seed", "A flag indicating whether to use a fixed seed to generate " "random mask. NOTE: DO NOT set this flag to true in " diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index e608eba05d5..43af83fd693 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -138,7 +138,7 @@ class FakeQuantizeAbsMaxOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( FakeQuantize operator -$$scale = max(abs(X))$$ +$$scale = max(abs(X))$$ $$range = 2^{bit_length - 1} - 1$$ $$Out = round(X/scale * range)$$ @@ -199,11 +199,14 @@ class FakeQuantizeRangeAbsMaxOpMaker PADDLE_ENFORCE(bit_length >= 1 && bit_length <= 16, "'bit_length' should be between 1 and 16."); }); - AddAttr("is_test", "").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddComment(R"DOC( FakeQuantize operator is used in static quantization. -$$scale = max(max(abs(x)), history_abs_max)$$ +$$scale = max(max(abs(x)), history_abs_max)$$ $$range = 2^{bit_length - 1} - 1$$ $$Out = round(X/scale * range)$$ diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index 61c3cb34a24..8994f270860 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -229,8 +229,8 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { "the input will be transformed automatically. ") .SetDefault("AnyLayout"); AddAttr("is_test", - "Turns on memory optimization that optimizes away " - "unnecessary memory allocations. Used by MKLDNN.") + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") .SetDefault(false); AddComment(R"DOC( diff --git a/paddle/fluid/operators/pool_mkldnn_op.cc b/paddle/fluid/operators/pool_mkldnn_op.cc index 56cef91e29c..0a9a29956af 100644 --- a/paddle/fluid/operators/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/pool_mkldnn_op.cc @@ -87,6 +87,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); + bool is_test = ctx.Attr("is_test"); if (ctx.Attr("global_pooling")) { for (size_t i = 0; i < ksize.size(); ++i) { paddings[i] = 0; @@ -142,16 +143,10 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { std::shared_ptr pool_pd = CreatePrimitiveDesc(src_md, dst_md, strides, padding_left_top, padding_right_bottom, ksize, pooling_type, - mkldnn_engine, ceil_mode); + mkldnn_engine, ceil_mode, is_test); // save pool_pd into global device context to be referred in backward path - dev_ctx.SetBlob(key_pool_pd, pool_pd); - - std::shared_ptr workspace_memory = - CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine); - - // save pool_workspace_memory to be referred in backward path - dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); + if (!is_test) dev_ctx.SetBlob(key_pool_pd, pool_pd); auto src_memory = std::make_shared(pool_pd->src_primitive_desc(), to_void_cast(input_data)); @@ -161,9 +156,19 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { dev_ctx.SetBlob(key_pool_src_mem_p, src_memory); dev_ctx.SetBlob(key_pool_dst_mem_p, dst_memory); - pool_p = std::make_shared(*pool_pd, *(src_memory.get()), - *(dst_memory.get()), - *workspace_memory); + if (is_test) { + pool_p = std::make_shared(*pool_pd, *src_memory, + *dst_memory); + } else { + std::shared_ptr workspace_memory = + CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine); + + // save pool_workspace_memory to be referred in backward path + dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); + + pool_p = std::make_shared( + *pool_pd, *src_memory, *dst_memory, *workspace_memory); + } dev_ctx.SetBlob(key_pool_p, pool_p); @@ -201,9 +206,12 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { const std::vector& stride, const std::vector& padding_left_top, const std::vector& padding_right_bot, const std::vector& kernel, const std::string& pooling_type, const mkldnn::engine& engine, - bool ceil_mode) const { + bool ceil_mode, bool is_test) const { + auto mkldnn_forward_prop_kind = is_test + ? mkldnn::prop_kind::forward_inference + : mkldnn::prop_kind::forward_training; auto pool_desc = mkldnn::pooling_forward::desc( - mkldnn::prop_kind::forward, + mkldnn_forward_prop_kind, pooling_type == "max" ? mkldnn::algorithm::pooling_max : mkldnn::algorithm::pooling_avg, src, dst, stride, kernel, padding_left_top, padding_right_bot, @@ -248,6 +256,10 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { out_grad->format() != memory::format::format_undef, "Wrong layout/format set for Input output_grad tensor"); + PADDLE_ENFORCE( + !ctx.Attr("is_test"), + "is_test attribute should be set to False in training phase."); + std::string pooling_type = ctx.Attr("pooling_type"); std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index 46a95350a72..52b607df744 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -206,6 +206,11 @@ void Pool2dOpMaker::Make() { "Defaults to \"NHWC\". Specify the data format of the output data, " "the input will be transformed automatically. ") .SetDefault("AnyLayout"); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); + // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 217bb1610fd..7e80b8db5e9 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -47,7 +47,10 @@ class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) This tensor is used for the sequence max-pooling " "to record the max indexes.") .AsIntermediate(); - AddAttr("is_test", "").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddAttr( "pooltype", "(string, default 'AVERAGE') the pooling pooltype of SequencePoolOp.") diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 9e21b6c824b..091ce4e6e8e 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -96,20 +96,21 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); AddAttr("is_test", - "Disable epsilon adding to softmax results. Used by MKLDNN.") + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") .SetDefault(false); AddComment(R"DOC( Softmax Operator. -The input of the softmax operator is a tensor of any rank. The output tensor +The input of the softmax operator is a tensor of any rank. The output tensor has the same shape as the input. -The input tensor will first be logically flattened to a 2-D matrix. The matrix's -second dimension(row length) is as same as the last dimension of the input -tensor, and the first dimension(column length) is the product of all other -dimensions of the input tensor. For each row of the matrix, the softmax operator -squashes the K-dimensional(K is the width of the matrix, which is also the size -of the input tensor's last dimension) vector of arbitrary real values to a +The input tensor will first be logically flattened to a 2-D matrix. The matrix's +second dimension(row length) is as same as the last dimension of the input +tensor, and the first dimension(column length) is the product of all other +dimensions of the input tensor. For each row of the matrix, the softmax operator +squashes the K-dimensional(K is the width of the matrix, which is also the size +of the input tensor's last dimension) vector of arbitrary real values to a K-dimensional vector of real values in the range [0, 1] that add up to 1. It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index aa6af055dec..2b56514fe08 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -92,7 +92,10 @@ class WhileOpMaker : public framework::OpProtoAndCheckerMaker { "variables generated in the i'th step."); AddAttr(kStepBlock, "The step block inside WhileOp"); - AddAttr("is_test", "True if in test phase.").SetDefault(false); + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddComment(R"DOC( )DOC"); } diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index 9a13cecc646..ccf7af334d0 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -73,6 +73,38 @@ class InferenceTranspiler(object): program) # ResNet residual block merging self._fuse_bn_relu_mkldnn(program) + self._is_test_pass(program) + + def _is_test_pass(self, program): + ''' + Transpile the program setting is_test = true for all layers and + inserts is_test attribute to pooling and activation layers. + As a result some operators might run faster + :param program: program to transpile + :type program: Program + ''' + self.block = program.block(0) + + i = 0 + while i < len(self.block.ops): + current_op = self.block.ops[i] + if current_op.has_attr("is_test"): + current_op._set_attr("is_test", True) + elif current_op.type in [ + "pool2d", "sigmoid", "logsigmoid", "softshrink", "exp", + "brelu", "pow", "leaky_relu", "stanh", "relu", "tanh", + "tanh_shrink", "sqrt", "abs", "ceil", "elu", "floor", "cos", + "sin", "round", "reciprocal", "hard_shrink", "hard_sigmoid", + "relu6", "soft_relu", "swish", "thresholded_relu", "log", + "square", "softplus", "softsign" + ]: + current_op._set_attr("is_test", True) + i = i + 1 + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + def _depthwise_conv_mkldnn(self, program): ''' Transpile the program by replacing depthwise_conv2d to conv2d for MKLDNN program. -- GitLab