未验证 提交 9b06dd86 编写于 作者: C chenhaoze 提交者: GitHub

Add three passes and api reference of paddle_pass_builder. test=develop (#23741)

* Add three passes and api reference of paddle_pass_builder.h
上级 fbdf7791
......@@ -72,6 +72,17 @@ void ConvElementwiseAdd2ActFusePass::ApplyImpl(ir::Graph* graph) const {
std::string act_op_type = act_op->Op()->Type();
std::string act_op_out = act_out->Name();
auto elementwise_add_out_shape = elementwise_add_out->Var()->GetShape();
auto add_in_y_1_shape = elementwise_add_in_y_1->Var()->GetShape();
if (elementwise_add_out_shape != add_in_y_1_shape) {
VLOG(3)
<< "The inputs X and Y's shapes of elementwise_add op are different.";
VLOG(3) << "conv_elementwise_add2_act_fuse_pass doesn't support this "
"pattern. Fusion will not apply.";
return;
}
auto new_op_proto = PrepareOpDesc(base_op_desc, bias_name, bias1_name,
act_op_type, act_op_out);
framework::OpDesc new_op_desc(new_op_proto, nullptr);
......
......@@ -1621,6 +1621,7 @@ PDNode *patterns::ConvElementwiseaddAct::operator()(PDNode *conv_in) {
auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr())
->assert_is_op("elementwise_add");
auto elementwise_add_in_y = pattern->NewNode(elementwise_add_in_y_repr())
->assert_is_persistable_var()
->assert_is_op_input("elementwise_add", "Y")
->AsInput();
auto elementwise_add_out = pattern->NewNode(elementwise_add_out_repr())
......@@ -1668,6 +1669,7 @@ PDNode *patterns::ConvElementwiseadd2Act::operator()(PDNode *conv_in) {
auto elementwise_add_op = pattern->NewNode(elementwise_add_op_repr())
->assert_is_op("elementwise_add");
auto elementwise_add_in_y = pattern->NewNode(elementwise_add_in_y_repr())
->assert_is_persistable_var()
->assert_is_op_input("elementwise_add", "Y")
->AsInput();
auto elementwise_add_out = pattern->NewNode(elementwise_add_out_repr())
......
......@@ -18,50 +18,80 @@
#include <string>
#include <vector>
/*! \file */
/*! \namespace paddle */
///
/// \file paddle_pass_builder.h
///
/// \brief Class Paddle Passs Builder and its subclasses(pass strategies).
/// \section sec_intro Introduction
/// This class aims to build passes for paddle and define passes' strategies.
///
/// \author paddle-infer@baidu.com
/// \date 2020-3-23
/// \since 1.7
/// \namespace paddle
namespace paddle {
/** This is a pass builder based on string. It is part of inference API.
*/
/// \class PaddlePassBuilder
/// \brief This class build passes based on vector<string> input. It is part of
/// inference API. Users can build passes, insert new passes, delete passes
/// using this class and its functions.
///
/// Example Usage:
/// Build a new pass.
/// \code{cpp}
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
class PaddlePassBuilder {
public:
/// \brief Constructor of the class. It stores the input passes.
/// \param[in] passes passes' types.
explicit PaddlePassBuilder(const std::vector<std::string> &passes)
: passes_(passes) {}
/// \brief Stores the input passes.
/// \param[in] passes passes' types.
void SetPasses(std::initializer_list<std::string> passes) {
passes_ = passes;
}
/** Append a pass to the end of the passes. */
/// \brief Append a pass to the end of the passes.
/// \param[in] pass_type the type of the new pass.
void AppendPass(const std::string &pass_type);
/** Insert a pass to a specific position.
* @param idx the position to insert.
* @param pass_type the pass key.
*/
/// \brief Insert a pass to a specific position.
/// \param[in] idx the position to insert.
/// \param[in] pass_type the type of insert pass.
void InsertPass(size_t idx, const std::string &pass_type);
/** Delete the `idx`-th pass. */
/// \brief Delete the pass at certain position 'idx'.
/// \param[in] idx the position to delete.
void DeletePass(size_t idx);
/** Delete all the passes that has type `pass_type`. */
/// \brief Delete all passes that has a certain type 'pass_type'.
/// \param[in] pass_type the certain pass type to be deleted.
void DeletePass(const std::string &pass_type);
/// \brief Delete all the passes.
void ClearPasses();
/** Append an analysis pass. */
/// \brief Append an analysis pass.
/// \param[in] pass the type of the new analysis pass.
void AppendAnalysisPass(const std::string &pass);
/** Visualize the computation graph after each pass by generating a DOT
* language file, one can draw them with the Graphviz toolkit.
*/
/// \brief Visualize the computation graph after each pass by generating a DOT
/// language file, one can draw them with the Graphviz toolkit.
void TurnOnDebug();
/** Human-readible information. */
/// \brief Human-readable information of the passes.
std::string DebugString();
/// \brief Get information of passes.
/// \return Return list of the passes.
const std::vector<std::string> &AllPasses() const { return passes_; }
/// \brief Get information of analysis passes.
/// \return Return list of analysis passes.
std::vector<std::string> AnalysisPasses() const {
auto passes = analysis_passes_;
// To make sure the ir_graph_to_program should be the last pass so any
......@@ -71,88 +101,121 @@ class PaddlePassBuilder {
}
protected:
/// \cond Protected
std::vector<std::string> analysis_passes_{
{"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass",
"ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",
"inference_op_replace_pass"}};
std::vector<std::string> passes_;
/// \endcond
};
/**Pass strategy to help control the IR passes.
*/
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
class PassStrategy : public PaddlePassBuilder {
public:
/// \brief Constructor of PassStrategy class. It works the same as
/// PaddlePassBuilder class. \param[in] passes passes' types.
explicit PassStrategy(const std::vector<std::string> &passes)
: PaddlePassBuilder(passes) {}
/** Enable the use of cuDNN kernel
*/
/// \brief Enable the use of cuDNN kernel.
virtual void EnableCUDNN() {}
/** The MKLDNN control exists in both CPU and GPU mode, because there can be
* still some CPU kernels running in CPU mode.
*/
/// \brief Enable the use of MKLDNN.
/// The MKLDNN control exists in both CPU and GPU mode, because there can
/// still be some CPU kernels running in GPU mode.
virtual void EnableMKLDNN() {}
/** Enable MKLDNN quantize optimization
*/
/// \brief Enable MKLDNN quantize optimization.
virtual void EnableMkldnnQuantizer() {}
/// \brief Check if we are using gpu.
/// \return A bool variable implying whether we are in gpu mode.
bool use_gpu() const { return use_gpu_; }
/// \brief Default destructor.
virtual ~PassStrategy() = default;
protected:
/// \cond Protected
bool use_gpu_{false};
bool use_mkldnn_{false};
/// \endcond
};
/** The CPU passes controller, it is used in AnalysisPredictor with CPU mode.
*/
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
class CpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of CpuPassStrategy.
CpuPassStrategy();
/// \brief Construct by copying another CpuPassStrategy object.
/// \param[in] other The CpuPassStrategy object we want to copy.
explicit CpuPassStrategy(const CpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {
use_gpu_ = other.use_gpu_;
use_mkldnn_ = other.use_mkldnn_;
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
}
/// \brief Default destructor.
virtual ~CpuPassStrategy() = default;
/// \brief Enable the use of cuDNN kernel.
void EnableCUDNN() override;
/// \brief Enable the use of MKLDNN.
void EnableMKLDNN() override;
/// \brief Enable MKLDNN quantize optimization.
void EnableMkldnnQuantizer() override;
protected:
/// \cond Protected
bool use_mkldnn_quantizer_{false};
/// \endcond
};
/** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode.
*/
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
class GpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of GpuPassStrategy.
GpuPassStrategy();
/// \brief Construct by copying another GpuPassStrategy object.
/// \param[in] other The GpuPassStrategy object we want to copy.
explicit GpuPassStrategy(const GpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {
use_gpu_ = true;
use_cudnn_ = other.use_cudnn_;
}
/// \brief Enable the use of cuDNN kernel.
void EnableCUDNN() override;
/// \brief Not supported in GPU mode yet.
void EnableMKLDNN() override;
/// \brief Not supported in GPU mode yet.
void EnableMkldnnQuantizer() override;
/// \brief Default destructor.
virtual ~GpuPassStrategy() = default;
protected:
/// \cond Protected
bool use_cudnn_{false};
/// \endcond
};
/// \brief List of tensorRT subgraph passes.
extern const std::vector<std::string> kTRTSubgraphPasses;
/// \brief List of lite subgraph passes.
extern const std::vector<std::string> kLiteSubgraphPasses;
} // namespace paddle
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
"""Test for fusion of conv, elementwise_add and 2 act."""
class ConvElementwiseAdd2ActFusePassTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32")
add_y2 = fluid.data(
name="add_y2", shape=[1, 3, 98, 98], dtype="float32")
conv_out = fluid.layers.conv2d(
input=data, num_filters=3, filter_size=3, bias_attr=None)
add1_out = fluid.layers.elementwise_add(
add_y2, conv_out, act="relu")
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32"),
"add_y2": np.random.random((1, 3, 98, 98)).astype("float32")
}
self.fetch_list = [add1_out]
self.enable_mkldnn = False
def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_option([True])
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
"""Test for fusion of conv, elementwise_add and act."""
class ConvElementwiseAddActFusePassTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d(
input=data,
num_filters=3,
filter_size=3,
bias_attr=param_attr,
act="relu")
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = False
def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_option([True])
if __name__ == "__main__":
unittest.main()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
"""Test for fusion of conv and elementwise_add."""
class ConvElementwiseAddFusePassTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[-1, 3, 100, 100], dtype="float32")
param_attr = fluid.ParamAttr(
initializer=fluid.initializer.Xavier(uniform=False),
learning_rate=0.001)
conv_out = fluid.layers.conv2d(
input=data, num_filters=3, filter_size=3, bias_attr=param_attr)
self.feeds = {
"data": np.random.random((1, 3, 100, 100)).astype("float32")
}
self.fetch_list = [conv_out]
self.enable_mkldnn = False
def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_option([True])
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册