提交 a04d9981 编写于 作者: M minqiyang

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into fix_python35_CI_random_fail

...@@ -104,6 +104,7 @@ visualDL --logdir=scratch_log --port=8080 ...@@ -104,6 +104,7 @@ visualDL --logdir=scratch_log --port=8080
# 访问 http://127.0.0.1:8080 # 访问 http://127.0.0.1:8080
``` ```
如果出现`TypeError: __init__() got an unexpected keyword argument 'file'`, 是因为protobuf不是3.5以上,运行`pip install --upgrade protobuf`就能解决。
如果在虚拟环境下仍然遇到安装问题,请尝试以下方法。 如果在虚拟环境下仍然遇到安装问题,请尝试以下方法。
......
...@@ -43,6 +43,7 @@ paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', ...@@ -43,6 +43,7 @@ paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list',
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None) paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None)) paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
paddle.fluid.Trainer.save_inference_model ArgSpec(args=['self', 'param_path', 'feeded_var_names', 'target_var_indexes'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None) paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None) paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None)
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/framework/ir/fc_lstm_fuse_pass.h" #include "paddle/fluid/framework/ir/fc_lstm_fuse_pass.h"
#include <string> #include <string>
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
......
...@@ -6,6 +6,7 @@ cc_library(analysis SRCS pass_manager.cc node.cc data_flow_graph.cc graph_traits ...@@ -6,6 +6,7 @@ cc_library(analysis SRCS pass_manager.cc node.cc data_flow_graph.cc graph_traits
analyzer.cc analyzer.cc
helper.cc helper.cc
# passes # passes
analysis_pass.cc
fluid_to_data_flow_graph_pass.cc fluid_to_data_flow_graph_pass.cc
data_flow_graph_to_fluid_pass.cc data_flow_graph_to_fluid_pass.cc
dfg_graphviz_draw_pass.cc dfg_graphviz_draw_pass.cc
...@@ -105,6 +106,6 @@ if (NOT EXISTS ${TEXT_CLASSIFICATION_INSTALL_DIR} AND WITH_TESTING AND WITH_INFE ...@@ -105,6 +106,6 @@ if (NOT EXISTS ${TEXT_CLASSIFICATION_INSTALL_DIR} AND WITH_TESTING AND WITH_INFE
inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_MODEL_URL} "text-classification-Senta.tar.gz") inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_MODEL_URL} "text-classification-Senta.tar.gz")
endif() endif()
inference_analysis_test(test_text_classification SRCS test_text_classification.cc inference_analysis_test(test_text_classification SRCS analyzer_text_classification_tester.cc
EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor
ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta) ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta)
...@@ -12,4 +12,4 @@ ...@@ -12,4 +12,4 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/analysis_pass.h"
...@@ -28,10 +28,10 @@ namespace paddle { ...@@ -28,10 +28,10 @@ namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
class Pass { class AnalysisPass {
public: public:
Pass() = default; AnalysisPass() = default;
virtual ~Pass() = default; virtual ~AnalysisPass() = default;
// Mutable Pass. // Mutable Pass.
virtual bool Initialize(Argument *argument) { return false; } virtual bool Initialize(Argument *argument) { return false; }
// Readonly Pass. // Readonly Pass.
...@@ -42,23 +42,16 @@ class Pass { ...@@ -42,23 +42,16 @@ class Pass {
virtual bool Finalize() { return false; } virtual bool Finalize() { return false; }
// Get a Pass appropriate to print the Node this pass operates on. // Get a Pass appropriate to print the Node this pass operates on.
virtual Pass *CreatePrinterPass(std::ostream &os, virtual AnalysisPass *CreatePrinterPass(std::ostream &os,
const std::string &banner) const { const std::string &banner) const {
return nullptr; return nullptr;
} }
// Create a debugger Pass that draw the DFG by graphviz toolkit. // Create a debugger Pass that draw the DFG by graphviz toolkit.
virtual Pass *CreateGraphvizDebugerPass() const { return nullptr; } virtual AnalysisPass *CreateGraphvizDebugerPass() const { return nullptr; }
virtual void Run() { LOG(FATAL) << "not valid"; }
// Run on a single Node.
virtual void Run(Node *x) { LOG(FATAL) << "not valid"; }
// Run on a single Function.
virtual void Run(Function *x) { LOG(FATAL) << "not valid"; }
// Run on a single FunctionBlock.
virtual void Run(FunctionBlock *x) { LOG(FATAL) << "not valid"; }
// Run on a single DataFlowGraph. // Run on a single DataFlowGraph.
virtual void Run(DataFlowGraph *x) { LOG(FATAL) << "not valid"; } virtual void Run(DataFlowGraph *x) = 0;
// Human-readable short representation. // Human-readable short representation.
virtual std::string repr() const = 0; virtual std::string repr() const = 0;
...@@ -66,29 +59,8 @@ class Pass { ...@@ -66,29 +59,8 @@ class Pass {
virtual std::string description() const { return "No DOC"; } virtual std::string description() const { return "No DOC"; }
}; };
// NodePass process on any Node types.
class NodePass : public Pass {
public:
virtual void Run(Node *node) = 0;
};
// NodePass process on any Function node types.
class FunctionPass : public Pass {
public:
virtual void Run(Function *node) = 0;
};
// NodePass process on any FunctionBlock node types.
class FunctionBlockPass : public Pass {
public:
virtual void Run(FunctionBlock *node) = 0;
};
// GraphPass processes on any GraphType. // GraphPass processes on any GraphType.
class DataFlowGraphPass : public Pass { class DataFlowGraphPass : public AnalysisPass {};
public:
virtual void Run(DataFlowGraph *graph) = 0;
};
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/analyzer.h"
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" #include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h"
#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" #include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h"
#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" #include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h"
...@@ -58,7 +59,7 @@ class DfgPassManagerImpl final : public DfgPassManager { ...@@ -58,7 +59,7 @@ class DfgPassManagerImpl final : public DfgPassManager {
std::string description() const override { return "DFG pass manager."; } std::string description() const override { return "DFG pass manager."; }
private: private:
void AddPass(const std::string& name, Pass* pass) { void AddPass(const std::string& name, AnalysisPass* pass) {
VLOG(3) << "Adding pass " << name; VLOG(3) << "Adding pass " << name;
Register(name, pass); Register(name, pass);
AddGraphvizDebugerPass(pass); AddGraphvizDebugerPass(pass);
...@@ -87,7 +88,7 @@ class DfgPassManagerImpl final : public DfgPassManager { ...@@ -87,7 +88,7 @@ class DfgPassManagerImpl final : public DfgPassManager {
} }
// Add the graphviz debuger pass if the parent pass has one. // Add the graphviz debuger pass if the parent pass has one.
void AddGraphvizDebugerPass(Pass* pass) { void AddGraphvizDebugerPass(AnalysisPass* pass) {
auto* debuger_pass = pass->CreateGraphvizDebugerPass(); auto* debuger_pass = pass->CreateGraphvizDebugerPass();
if (debuger_pass) { if (debuger_pass) {
Register(debuger_pass->repr(), debuger_pass); Register(debuger_pass->repr(), debuger_pass);
......
...@@ -36,8 +36,11 @@ limitations under the License. */ ...@@ -36,8 +36,11 @@ limitations under the License. */
*/ */
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <string>
#include <vector>
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/flags.h" #include "paddle/fluid/inference/analysis/flags.h"
#include "paddle/fluid/inference/analysis/pass.h"
#include "paddle/fluid/inference/analysis/pass_manager.h" #include "paddle/fluid/inference/analysis/pass_manager.h"
namespace paddle { namespace paddle {
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/analysis/analyzer.h"
#include <gflags/gflags.h> #include <gflags/gflags.h>
#include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files. #include <glog/logging.h> // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files.
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/inference/api/timer.h" #include "paddle/fluid/inference/api/timer.h"
DEFINE_string(infer_model, "", "Directory of the inference model."); DEFINE_string(infer_model, "", "Directory of the inference model.");
...@@ -100,10 +101,3 @@ void Main(int batch_size) { ...@@ -100,10 +101,3 @@ void Main(int batch_size) {
TEST(text_classification, basic) { Main(FLAGS_batch_size); } TEST(text_classification, basic) { Main(FLAGS_batch_size); }
} // namespace paddle } // namespace paddle
USE_PASS(fc_fuse_pass);
USE_PASS(seq_concat_fc_fuse_pass);
USE_PASS(fc_lstm_fuse_pass);
USE_PASS(graph_viz_pass);
USE_PASS(infer_clean_graph_pass);
USE_PASS(attention_lstm_fuse_pass);
...@@ -263,7 +263,7 @@ class DFG_DebuggerPass : public DFG_GraphvizDrawPass { ...@@ -263,7 +263,7 @@ class DFG_DebuggerPass : public DFG_GraphvizDrawPass {
}; };
} // namespace } // namespace
Pass *DataFlowGraphToFluidPass::CreateGraphvizDebugerPass() const { AnalysisPass *DataFlowGraphToFluidPass::CreateGraphvizDebugerPass() const {
return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config( return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config(
FLAGS_IA_graphviz_log_root, FLAGS_IA_graphviz_log_root,
"data_flow_graph_to_fluid_graphviz_debugger")); "data_flow_graph_to_fluid_graphviz_debugger"));
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include <string> #include <string>
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h"
#include "paddle/fluid/inference/analysis/pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -42,7 +42,7 @@ class DataFlowGraphToFluidPass final : public DataFlowGraphPass { ...@@ -42,7 +42,7 @@ class DataFlowGraphToFluidPass final : public DataFlowGraphPass {
return "Transform a DFG to a Fluid ProgramDesc"; return "Transform a DFG to a Fluid ProgramDesc";
} }
Pass *CreateGraphvizDebugerPass() const override; AnalysisPass *CreateGraphvizDebugerPass() const override;
protected: protected:
// Add a Fluid Op into the ProgramDesc. // Add a Fluid Op into the ProgramDesc.
......
...@@ -21,8 +21,8 @@ limitations under the License. */ ...@@ -21,8 +21,8 @@ limitations under the License. */
#include <fstream> #include <fstream>
#include <string> #include <string>
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/dot.h" #include "paddle/fluid/inference/analysis/dot.h"
#include "paddle/fluid/inference/analysis/pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
......
...@@ -66,7 +66,7 @@ class DFG_DebuggerPass : public DFG_GraphvizDrawPass { ...@@ -66,7 +66,7 @@ class DFG_DebuggerPass : public DFG_GraphvizDrawPass {
}; };
} }
Pass *FluidToDataFlowGraphPass::CreateGraphvizDebugerPass() const { AnalysisPass *FluidToDataFlowGraphPass::CreateGraphvizDebugerPass() const {
return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config( return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config(
FLAGS_IA_graphviz_log_root, "fluid-to-dfg-debuger")); FLAGS_IA_graphviz_log_root, "fluid-to-dfg-debuger"));
} }
......
...@@ -22,8 +22,8 @@ ...@@ -22,8 +22,8 @@
#include <string> #include <string>
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/data_flow_graph.h" #include "paddle/fluid/inference/analysis/data_flow_graph.h"
#include "paddle/fluid/inference/analysis/pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -46,7 +46,7 @@ class FluidToDataFlowGraphPass final : public DataFlowGraphPass { ...@@ -46,7 +46,7 @@ class FluidToDataFlowGraphPass final : public DataFlowGraphPass {
return "transform a fluid ProgramDesc to a data flow graph."; return "transform a fluid ProgramDesc to a data flow graph.";
} }
Pass *CreateGraphvizDebugerPass() const override; AnalysisPass *CreateGraphvizDebugerPass() const override;
private: private:
framework::proto::ProgramDesc const *desc_; framework::proto::ProgramDesc const *desc_;
......
...@@ -14,15 +14,17 @@ ...@@ -14,15 +14,17 @@
#pragma once #pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/flags.h" #include "paddle/fluid/inference/analysis/flags.h"
#include "paddle/fluid/inference/analysis/ir_pass_manager.h" #include "paddle/fluid/inference/analysis/ir_pass_manager.h"
#include "paddle/fluid/inference/analysis/pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
namespace analysis { namespace analysis {
using namespace framework;
static const char kFluidToIrPassesAttr[] = "__fluid_to_ir_passes__"; static const char kFluidToIrPassesAttr[] = "__fluid_to_ir_passes__";
...@@ -48,7 +50,8 @@ class FluidToIrPass final : public DataFlowGraphPass { ...@@ -48,7 +50,8 @@ class FluidToIrPass final : public DataFlowGraphPass {
ANALYSIS_ARGUMENT_CHECK_FIELD(argument->fluid_model_program_path); ANALYSIS_ARGUMENT_CHECK_FIELD(argument->fluid_model_program_path);
// Load program. // Load program.
auto program = LoadProgramDesc(*argument->fluid_model_program_path); auto program = LoadProgramDesc(*argument->fluid_model_program_path);
argument->origin_program_desc.reset(new proto::ProgramDesc(program)); argument->origin_program_desc.reset(
new framework::proto::ProgramDesc(program));
// Create main data flow graph. // Create main data flow graph.
if (!argument->main_dfg) { if (!argument->main_dfg) {
argument->main_dfg.reset(new DataFlowGraph); argument->main_dfg.reset(new DataFlowGraph);
...@@ -78,12 +81,13 @@ class FluidToIrPass final : public DataFlowGraphPass { ...@@ -78,12 +81,13 @@ class FluidToIrPass final : public DataFlowGraphPass {
IRPassManager ir_passes(argument_->Get<ProgramDesc>("ir_program_desc"), IRPassManager ir_passes(argument_->Get<ProgramDesc>("ir_program_desc"),
nullptr); nullptr);
// Pass the scope from analysis to IR if needed. // Pass the scope from analysis to IR if needed.
if (argument_->Has(ir::kParamScopeAttr)) { if (argument_->Has(framework::ir::kParamScopeAttr)) {
// Here the address is passed, attention that IR doesn't own the scope, so // Here the address is passed, attention that IR doesn't own the scope, so
// the real scope in analysis should live during the IR phase. // the real scope in analysis should live during the IR phase.
ir_passes.graph().Set( ir_passes.graph().Set(
ir::kParamScopeAttr, framework::ir::kParamScopeAttr,
new Scope *(&argument_->Get<Scope>(ir::kParamScopeAttr))); new framework::Scope *(&argument_->Get<framework::Scope>(
framework::ir::kParamScopeAttr)));
} }
if (FLAGS_IA_enable_ir) { if (FLAGS_IA_enable_ir) {
...@@ -95,12 +99,12 @@ class FluidToIrPass final : public DataFlowGraphPass { ...@@ -95,12 +99,12 @@ class FluidToIrPass final : public DataFlowGraphPass {
PADDLE_ENFORCE(argument_->main_dfg.get()); PADDLE_ENFORCE(argument_->main_dfg.get());
argument_->main_dfg->Build(ir_passes.graph()); argument_->main_dfg->Build(ir_passes.graph());
// inherit the arguments from ir. // inherit the arguments from ir.
if (ir_passes.graph().Has(ir::kFuseStatisAttr)) { if (ir_passes.graph().Has(framework::ir::kFuseStatisAttr)) {
argument_->Set( argument_->Set(
ir::kFuseStatisAttr, framework::ir::kFuseStatisAttr,
new std::unordered_map<std::string, int>( new std::unordered_map<std::string, int>(
ir_passes.graph().Get<std::unordered_map<std::string, int>>( ir_passes.graph().Get<std::unordered_map<std::string, int>>(
ir::kFuseStatisAttr))); framework::ir::kFuseStatisAttr)));
} }
} }
...@@ -112,7 +116,7 @@ class FluidToIrPass final : public DataFlowGraphPass { ...@@ -112,7 +116,7 @@ class FluidToIrPass final : public DataFlowGraphPass {
private: private:
// Load parameters from a single file or from a directory. // Load parameters from a single file or from a directory.
bool LoadParams(Scope *scope, const std::string &dir, bool LoadParams(framework::Scope *scope, const std::string &dir,
const std::string &prog_file, const std::string &param_file); const std::string &prog_file, const std::string &param_file);
private: private:
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#pragma once #pragma once
#include <string> #include <string>
#include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/analysis_pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
......
...@@ -40,17 +40,6 @@ void DfgPassManager::RunAll() { ...@@ -40,17 +40,6 @@ void DfgPassManager::RunAll() {
} }
} }
void NodePassManager::RunAll() {
PADDLE_ENFORCE(argument_);
PADDLE_ENFORCE(argument_->main_dfg.get());
auto trait = GraphTraits<DataFlowGraph>(*argument_->main_dfg).nodes_in_DFS();
for (auto& node : trait) {
for (auto& pass : data_) {
pass->Run(&node);
}
}
}
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
...@@ -33,7 +33,7 @@ limitations under the License. */ ...@@ -33,7 +33,7 @@ limitations under the License. */
#include <string> #include <string>
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/analysis_pass.h"
namespace paddle { namespace paddle {
namespace inference { namespace inference {
...@@ -43,7 +43,7 @@ namespace analysis { ...@@ -43,7 +43,7 @@ namespace analysis {
* PassManager is the base class for all pass managers, a pass manager has * PassManager is the base class for all pass managers, a pass manager has
* several Pass-es registered, and execute them in the linear order. * several Pass-es registered, and execute them in the linear order.
*/ */
class PassManager : public OrderedRegistry<Pass> { class PassManager : public OrderedRegistry<AnalysisPass> {
public: public:
PassManager() = default; PassManager() = default;
// Call all the passes' Initialize methods. The desc and data_flow_graph are // Call all the passes' Initialize methods. The desc and data_flow_graph are
...@@ -89,18 +89,6 @@ class DfgPassManager : public PassManager { ...@@ -89,18 +89,6 @@ class DfgPassManager : public PassManager {
virtual ~DfgPassManager() = default; virtual ~DfgPassManager() = default;
}; };
/*
* A pass manager that process a Node each time.
*/
class NodePassManager : public PassManager {
public:
NodePassManager() = default;
void RunAll() override;
virtual ~NodePassManager() = default;
};
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
...@@ -34,28 +34,6 @@ class TestDfgPassManager final : public DfgPassManager { ...@@ -34,28 +34,6 @@ class TestDfgPassManager final : public DfgPassManager {
std::string description() const override { return "test doc"; } std::string description() const override { return "test doc"; }
}; };
class TestNodePassManager final : public NodePassManager {
public:
virtual ~TestNodePassManager() = default;
std::string repr() const override { return "test-node-pass-manager"; }
std::string description() const override { return "test doc"; }
};
class TestNodePass final : public NodePass {
public:
virtual ~TestNodePass() = default;
bool Initialize(Argument* argument) override { return true; }
void Run(Node* node) override {
LOG(INFO) << "- Processing node " << node->repr();
}
std::string repr() const override { return "test-node"; }
std::string description() const override { return "some doc"; }
};
TEST(PassManager, DFG_pass_manager) { TEST(PassManager, DFG_pass_manager) {
TestDfgPassManager manager; TestDfgPassManager manager;
DFG_GraphvizDrawPass::Config config("./", "dfg.dot"); DFG_GraphvizDrawPass::Config config("./", "dfg.dot");
...@@ -71,19 +49,6 @@ TEST(PassManager, DFG_pass_manager) { ...@@ -71,19 +49,6 @@ TEST(PassManager, DFG_pass_manager) {
manager.RunAll(); manager.RunAll();
} }
TEST(PassManager, Node_pass_manager) {
Argument argument(FLAGS_inference_model_dir);
// Pre-process: initialize the DFG with the ProgramDesc first.
FluidToDataFlowGraphPass pass0;
pass0.Initialize(&argument);
pass0.Run(argument.main_dfg.get());
TestNodePassManager manager;
manager.Register("test-node-pass", new TestNodePass);
ASSERT_TRUE(manager.Initialize(&argument));
manager.RunAll();
}
} // namespace analysis } // namespace analysis
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
...@@ -68,7 +68,7 @@ class DfgDebuggerPass : public DFG_GraphvizDrawPass { ...@@ -68,7 +68,7 @@ class DfgDebuggerPass : public DFG_GraphvizDrawPass {
} }
}; };
Pass *TensorRTSubgraphNodeMarkPass::CreateGraphvizDebugerPass() const { AnalysisPass *TensorRTSubgraphNodeMarkPass::CreateGraphvizDebugerPass() const {
DFG_GraphvizDrawPass::Config config(FLAGS_IA_graphviz_log_root, DFG_GraphvizDrawPass::Config config(FLAGS_IA_graphviz_log_root,
"tensorrt_marked_node"); "tensorrt_marked_node");
return new DfgDebuggerPass(config); return new DfgDebuggerPass(config);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#pragma once #pragma once
#include <string> #include <string>
#include "paddle/fluid/inference/analysis/pass.h" #include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/subgraph_splitter.h" #include "paddle/fluid/inference/analysis/subgraph_splitter.h"
namespace paddle { namespace paddle {
...@@ -48,7 +48,7 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass { ...@@ -48,7 +48,7 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass {
return "tensorrt sub-graph mark pass"; return "tensorrt sub-graph mark pass";
} }
Pass* CreateGraphvizDebugerPass() const override; AnalysisPass* CreateGraphvizDebugerPass() const override;
bool Finalize() override; bool Finalize() override;
private: private:
......
...@@ -15,8 +15,8 @@ limitations under the License. */ ...@@ -15,8 +15,8 @@ limitations under the License. */
#pragma once #pragma once
#include <string> #include <string>
#include "paddle/fluid/inference/analysis/analysis_pass.h"
#include "paddle/fluid/inference/analysis/node.h" #include "paddle/fluid/inference/analysis/node.h"
#include "paddle/fluid/inference/analysis/pass.h"
#include "paddle/fluid/inference/analysis/subgraph_splitter.h" #include "paddle/fluid/inference/analysis/subgraph_splitter.h"
namespace paddle { namespace paddle {
......
...@@ -44,19 +44,7 @@ function(inference_api_test TARGET_NAME) ...@@ -44,19 +44,7 @@ function(inference_api_test TARGET_NAME)
endfunction(inference_api_test) endfunction(inference_api_test)
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor)
cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis)
analysis
ir_pass_manager
pass
fc_fuse_pass
fc_lstm_fuse_pass
seq_concat_fc_fuse_pass
graph_viz_pass
infer_clean_graph_pass
graph_pattern_detector
infer_clean_graph_pass
attention_lstm_fuse_pass
)
cc_test(test_paddle_inference_api cc_test(test_paddle_inference_api
SRCS api_tester.cc SRCS api_tester.cc
......
...@@ -30,14 +30,7 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -30,14 +30,7 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const {
"Input(WeightX) of GRU should not be null."); "Input(WeightX) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasInput("WeightH"), PADDLE_ENFORCE(ctx->HasInput("WeightH"),
"Input(WeightH) of GRU should not be null."); "Input(WeightH) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("XX"), "Output(XX) of GRU should not be null."); PADDLE_ENFORCE(ctx->HasOutput("XX"), "Output(XX) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
"Output(ReorderedH0) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
"Output(BatchedInput) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedOut"),
"Output(BatchedOut) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Hidden"), PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
"Output(Hidden) of GRU should not be null."); "Output(Hidden) of GRU should not be null.");
...@@ -80,15 +73,20 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -80,15 +73,20 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const {
} }
framework::DDim out_dims({x_dims[0], frame_size}); framework::DDim out_dims({x_dims[0], frame_size});
ctx->SetOutputDim("Hidden", out_dims); ctx->SetOutputDim("Hidden", out_dims);
ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
ctx->SetOutputDim("BatchedOut", out_dims);
ctx->ShareLoD("X", "Hidden"); ctx->ShareLoD("X", "Hidden");
int xx_width; int xx_width;
if (ctx->Attrs().Get<bool>("use_seq")) { if (ctx->Attrs().Get<bool>("use_seq")) {
xx_width = wx_dims[1]; xx_width = wx_dims[1];
} else { } else {
xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1];
PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
"Output(ReorderedH0) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
"Output(BatchedInput) of GRU should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedOut"),
"Output(BatchedOut) of GRU should not be null.");
ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
ctx->SetOutputDim("BatchedOut", out_dims);
} }
ctx->SetOutputDim("XX", {x_dims[0], xx_width}); ctx->SetOutputDim("XX", {x_dims[0], xx_width});
ctx->ShareLoD("X", "XX"); ctx->ShareLoD("X", "XX");
......
...@@ -38,16 +38,6 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -38,16 +38,6 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
"Output(Hidden) of LSTM should not be null."); "Output(Hidden) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Cell"), PADDLE_ENFORCE(ctx->HasOutput("Cell"),
"Output(Cell) of LSTM should not be null."); "Output(Cell) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
"Output(BatchedInput) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"),
"Output(BatchedHidden) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedCell"),
"Output(BatchedCell) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
"Output(ReorderedH0) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ReorderedC0"),
"Output(ReorderedC0) of LSTM should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2.");
...@@ -88,9 +78,8 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -88,9 +78,8 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2."); PADDLE_ENFORCE_EQ(b_dims.size(), 2, "The rank of Input(Bias) should be 2.");
PADDLE_ENFORCE_EQ(b_dims[0], 1, PADDLE_ENFORCE_EQ(b_dims[0], 1,
"The first dimension of Input(Bias) should be 1."); "The first dimension of Input(Bias) should be 1.");
PADDLE_ENFORCE_EQ(
auto use_peepholes = ctx->Attrs().Get<bool>("use_peepholes"); b_dims[1], (ctx->Attrs().Get<bool>("use_peepholes") ? 7 : 4) * frame_size,
PADDLE_ENFORCE_EQ(b_dims[1], (use_peepholes ? 7 : 4) * frame_size,
"The second dimension of Input(Bias) should be " "The second dimension of Input(Bias) should be "
"7 * %d if enable peepholes connection or" "7 * %d if enable peepholes connection or"
"4 * %d if disable peepholes", "4 * %d if disable peepholes",
...@@ -99,17 +88,26 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -99,17 +88,26 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const {
framework::DDim out_dims({x_dims[0], frame_size}); framework::DDim out_dims({x_dims[0], frame_size});
ctx->SetOutputDim("Hidden", out_dims); ctx->SetOutputDim("Hidden", out_dims);
ctx->SetOutputDim("Cell", out_dims); ctx->SetOutputDim("Cell", out_dims);
ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
ctx->SetOutputDim("BatchedHidden", out_dims);
ctx->SetOutputDim("BatchedCell", out_dims);
ctx->ShareLoD("X", "Hidden"); ctx->ShareLoD("X", "Hidden");
ctx->ShareLoD("X", "Cell"); ctx->ShareLoD("X", "Cell");
int xx_width; int xx_width;
if (ctx->Attrs().Get<bool>("use_seq")) { if (ctx->Attrs().Get<bool>("use_seq")) {
xx_width = wx_dims[1]; xx_width = wx_dims[1];
} else { } else {
xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1];
PADDLE_ENFORCE(ctx->HasOutput("BatchedInput"),
"Output(BatchedInput) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedHidden"),
"Output(BatchedHidden) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("BatchedCell"),
"Output(BatchedCell) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ReorderedH0"),
"Output(ReorderedH0) of LSTM should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("ReorderedC0"),
"Output(ReorderedC0) of LSTM should not be null.");
ctx->SetOutputDim("BatchedInput", {x_dims[0], wx_dims[1]});
ctx->SetOutputDim("BatchedHidden", out_dims);
ctx->SetOutputDim("BatchedCell", out_dims);
} }
ctx->SetOutputDim("XX", {x_dims[0], xx_width}); ctx->SetOutputDim("XX", {x_dims[0], xx_width});
ctx->ShareLoD("X", "XX"); ctx->ShareLoD("X", "XX");
...@@ -242,8 +240,8 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -242,8 +240,8 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
auto* xx = ctx.Output<LoDTensor>("XX"); \ auto* xx = ctx.Output<LoDTensor>("XX"); \
auto* hidden_out = ctx.Output<LoDTensor>("Hidden"); \ auto* hidden_out = ctx.Output<LoDTensor>("Hidden"); \
auto* cell_out = ctx.Output<LoDTensor>("Cell"); \ auto* cell_out = ctx.Output<LoDTensor>("Cell"); \
bool use_peepholes = ctx.Attr<bool>("use_peepholes"); \ bool is_reverse = ctx.Attr<bool>("is_reverse"); \
bool is_reverse = ctx.Attr<bool>("is_reverse"); bool use_peepholes = ctx.Attr<bool>("use_peepholes");
#define INIT_BASE_SIZES \ #define INIT_BASE_SIZES \
auto x_dims = x->dims(); /* T x M*/ \ auto x_dims = x->dims(); /* T x M*/ \
...@@ -254,172 +252,183 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -254,172 +252,183 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
const int D3 = D * 3; \ const int D3 = D * 3; \
const int D4 = wh_dims[1]; const int D4 = wh_dims[1];
#define INIT_BASE_INPUT_DATAS \
const T* x_data = x->data<T>(); \
const T* wx_data = wx->data<T>(); \
const T* wh_data = wh->data<T>(); \
/* diagonal weight*/ \
const T* wc_data = bias->data<T>() + D4; \
/* for peephole only*/ \
Tensor checked_cell; \
T* checked_cell_data = nullptr; \
auto place = ctx.GetPlace(); \
if (use_peepholes) { \
/* w_ic * Ct-1, w_fc * Ct-1 ; w_oc * Ct => ih*/ \
checked_cell_data = checked_cell.mutable_data<T>({2, D}, place); \
}
/// Compute LSTM
#define GEMM_WH_ADDON(bs, prev, out) \
blas.GEMM(CblasNoTrans, CblasNoTrans, bs, D4, D, static_cast<T>(1), prev, D, \
wh_data, D4, static_cast<T>(1), out, D4)
// gates: W_ch, W_ih, W_fh, W_oh
#define GET_Ct(ct_1, gates, ct) \
/* C_t = C_t-1 * fgated + cand_gated * igated*/ \
act_cand(D, gates, gates); \
blas.VMUL(D, gates, gates + D, gates + D); \
blas.VMUL(D, ct_1, gates + D2, gates + D2); \
blas.VADD(D, gates + D, gates + D2, ct)
#define GET_Ht(ct, gates, ht) \
/* H_t = act_cell(C_t) * ogated */ \
act_cell(D, ct, gates + D2); \
blas.VMUL(D, gates + D2, gates + D3, ht)
#define GET_Ct_NOH0C0(gates, ct) \
/* C_t = igated * cgated*/ \
act_gate(D, gates + D, gates + D); \
act_cand(D, gates, gates); \
blas.VMUL(D, gates, gates + D, ct)
#define COMPUTE_CtHt_NOH0C0(gates, ct, ht) \
GET_Ct_NOH0C0(gates, ct); \
act_gate(D, gates + D3, gates + D3); \
GET_Ht(ct, gates, ht)
#define COMPUTE_CtHt_PEEPHOLE_NOH0C0(gates, ct, ht) \
GET_Ct_NOH0C0(gates, ct); \
/* get outgated, put W_oc * C_t on igated */ \
blas.VMUL(D, wc_data + D2, ct, gates + D); \
blas.VADD(D, gates + D, gates + D3, gates + D3); \
act_gate(D, gates + D3, gates + D3); \
GET_Ht(ct, gates, ht)
#define COMPUTE_CtHt(gates, ct_1, ct, ht) \
act_gate(D3, gates + D, gates + D); \
GET_Ct(ct_1, gates, ct); \
GET_Ht(ct, gates, ht)
#define COMPUTE_CtHt_PEEPHOLE(gates, ct_1, ct, ht) \
/* get fgated and igated*/ \
blas.VMUL(D, wc_data, ct_1, checked_cell_data); \
blas.VMUL(D, wc_data + D, ct_1, checked_cell_data + D); \
blas.VADD(D2, checked_cell_data, gates + D, gates + D); \
act_gate(D2, gates + D, gates + D); \
GET_Ct(ct_1, gates, ct); \
/* get ogated*/ \
blas.VMUL(D, wc_data + D2, ct, gates + D); \
blas.VADD(D, gates + D, gates + D3, gates + D3); \
act_gate(D, gates + D3, gates + D3); \
GET_Ht(ct, gates, ht)
void SeqCompute(const framework::ExecutionContext& ctx) const { void SeqCompute(const framework::ExecutionContext& ctx) const {
using DeviceContext = paddle::platform::CPUDeviceContext; using DeviceContext = paddle::platform::CPUDeviceContext;
INIT_BASE_INPUT_OUTPUT INIT_BASE_INPUT_OUTPUT
INIT_BASE_SIZES INIT_BASE_SIZES
INIT_VEC_FUNC INIT_VEC_FUNC
INIT_BASE_INPUT_DATAS
auto x_lod = x->lod(); auto x_lod = x->lod();
const int total_T = x_dims[0]; const int total_T = x_dims[0];
const int N = x_lod[0].size() - 1; // batch size const int N = x_lod[0].size() - 1;
const T* x_data = x->data<T>();
const T* h0_data = h0 ? h0->data<T>() : nullptr; const T* h0_data = h0 ? h0->data<T>() : nullptr;
const T* c0_data = c0 ? c0->data<T>() : nullptr; const T* c0_data = c0 ? c0->data<T>() : nullptr;
const T* bias_data = bias->data<T>(); T* xx_data = xx->mutable_data<T>(place);
const T* wc_data = bias_data + D4; // w_ic, w_fc, w_oc T* h_out_data = hidden_out->mutable_data<T>(place);
const T* wx_data = wx->data<T>(); T* c_out_data = cell_out->mutable_data<T>(place);
const T* wh_data = wh->data<T>();
T* xx_data = xx->mutable_data<T>(ctx.GetPlace());
T* hidden_out_data = hidden_out->mutable_data<T>(ctx.GetPlace());
T* cell_out_data = cell_out->mutable_data<T>(ctx.GetPlace());
// use local variable
framework::DDim check_dims({3, D});
Tensor checked_cell; // w_ic * Ct-1, w_fc * Ct-1, w_oc * Ct
auto checked_cell_data =
checked_cell.mutable_data<T>(check_dims, ctx.GetPlace());
auto blas = math::GetBlas<DeviceContext, T>(ctx); auto blas = math::GetBlas<DeviceContext, T>(ctx);
math::FCCompute<DeviceContext, T>(blas, total_T, D4, M, x_data, wx_data, math::FCCompute<DeviceContext, T>(blas, total_T, D4, M, x_data, wx_data,
xx_data, bias->data<T>()); xx_data, bias->data<T>());
int xx_offset = D4; int xx_offset = D4;
int gate_offset = D; int gate_offset = D;
if (is_reverse) { if (is_reverse) {
const int offset = (total_T - 1) * D; const int offset = (total_T - 1) * D;
xx_data = xx_data + offset * 4; xx_data = xx_data + offset * 4;
hidden_out_data = hidden_out_data + offset; h_out_data = h_out_data + offset;
cell_out_data = cell_out_data + offset; c_out_data = c_out_data + offset;
xx_offset = -D4; xx_offset = -D4;
gate_offset = -D; gate_offset = -D;
} }
auto move_step = [&]() { #define MOVE_ONE_STEP \
xx_data = xx_data + xx_offset; prev_h_data = h_out_data; \
hidden_out_data = hidden_out_data + gate_offset; prev_c_data = c_out_data; \
cell_out_data = cell_out_data + gate_offset; xx_data = xx_data + xx_offset; \
}; h_out_data = h_out_data + gate_offset; \
c_out_data = c_out_data + gate_offset
for (int i = 0; i < N; ++i) {
int bid = is_reverse ? N - 1 - i : i; #define PROCESS_H0C0_DEFINES \
int seq_len = x_lod[0][bid + 1] - x_lod[0][bid]; int bid = is_reverse ? N - 1 - i : i; \
const T* prev_c_data = nullptr; int seq_len = x_lod[0][bid + 1] - x_lod[0][bid]; \
const T* prev_h_data = nullptr; const T* prev_c_data = nullptr; \
const T* prev_h_data = nullptr; \
int tstart = 0; int tstart = 0
if (h0_data) {
prev_h_data = h0_data + bid * D; #define PROCESS_H0C0_PEEPHOLE \
prev_c_data = c0_data + bid * D; PROCESS_H0C0_DEFINES; \
} else { if (h0_data) { \
// If step == 0 and there is no initialized hidden state, that is to say prev_h_data = h0_data + bid * D; \
// the H0 is zeros. Then W_h * H_t-1 can be skipped prev_c_data = c0_data + bid * D; \
} else { \
COMPUTE_CtHt_PEEPHOLE_NOH0C0(xx_data, c_out_data, h_out_data); \
MOVE_ONE_STEP; \
tstart = 1; \
}
// ~C_t #define PROCESS_H0C0 \
act_cand(D, xx_data, xx_data); PROCESS_H0C0_DEFINES; \
if (use_peepholes) { if (h0_data) { \
// I_t, F_t prev_h_data = h0_data + bid * D; \
act_gate(D2, xx_data + D, xx_data + D); prev_c_data = c0_data + bid * D; \
} else { } else { \
// I_t, F_t, O_t COMPUTE_CtHt_NOH0C0(xx_data, c_out_data, h_out_data); \
act_gate(D3, xx_data + D, xx_data + D); MOVE_ONE_STEP; \
tstart = 1; \
} }
// C_t = I_t * ~C_t
blas.VMUL(D, xx_data, xx_data + D, cell_out_data);
if (use_peepholes) { if (use_peepholes) {
// + W_oc * C_t for peephole connection for (int i = 0; i < N; ++i) {
blas.VMUL(D, wc_data + D2, cell_out_data, checked_cell_data + D2); PROCESS_H0C0_PEEPHOLE
blas.VADD(D, xx_data + D3, checked_cell_data + D2, xx_data + D3); for (int step = tstart; step < seq_len; ++step) {
// O_t GEMM_WH_ADDON(1, prev_h_data, xx_data);
act_gate(D, xx_data + D3, xx_data + D3); COMPUTE_CtHt_PEEPHOLE(xx_data, prev_c_data, c_out_data, h_out_data);
MOVE_ONE_STEP;
} }
// hidden out= act_state(cellout) * outgate
act_cell(D, cell_out_data, xx_data + D2);
// H_t = O_t * act_state(C_t)
blas.VMUL(D, xx_data + D2, xx_data + D3, hidden_out_data);
// prev
prev_h_data = hidden_out_data;
prev_c_data = cell_out_data;
tstart = 1;
move_step();
} }
for (int step = tstart; step < seq_len; ++step) {
// + W_h * H_t-1
blas.GEMM(CblasNoTrans, CblasNoTrans, 1, D4, D, static_cast<T>(1),
prev_h_data, D, wh_data, D4, static_cast<T>(1), xx_data, D4);
// ~C_t
act_cand(D, xx_data, xx_data);
if (use_peepholes) {
// + W_ic|W_fc * C_t-1 for peephole connection
blas.VMUL(D, wc_data, prev_c_data, checked_cell_data);
blas.VMUL(D, wc_data + D, prev_c_data, checked_cell_data + D);
blas.VADD(D2, xx_data + D, checked_cell_data, xx_data + D);
// I_t, F_t
act_gate(D2, xx_data + D, xx_data + D);
} else { } else {
// I_t, F_t, O_t for (int i = 0; i < N; ++i) {
act_gate(D3, xx_data + D, xx_data + D); PROCESS_H0C0
for (int step = tstart; step < seq_len; ++step) {
GEMM_WH_ADDON(1, prev_h_data, xx_data);
COMPUTE_CtHt(xx_data, prev_c_data, c_out_data, h_out_data);
MOVE_ONE_STEP;
} }
// F_t * C_t-1
blas.VMUL(D, xx_data + D2, prev_c_data, xx_data + D2);
// I_t * ~C_t
blas.VMUL(D, xx_data, xx_data + D, xx_data + D);
// C_t = F_t * C_t-1 + I_t * ~C_t
blas.VADD(D, xx_data + D, xx_data + D2, cell_out_data);
if (use_peepholes) {
// + W_oc * C_t for peephole connection
blas.VMUL(D, wc_data + D2, cell_out_data, checked_cell_data + D2);
blas.VADD(D, xx_data + D3, checked_cell_data + D2, xx_data + D3);
// O_t
act_gate(D, xx_data + D3, xx_data + D3);
} }
}
// hidden out= act_state(cellout) * outgate #undef PROCESS_H0C0_DEFINES
act_cell(D, cell_out_data, xx_data + D2); #undef PROCESS_H0C0_PEEPHOLE
// H_t = O_t * act_state(C_t) #undef PROCESS_H0C0
blas.VMUL(D, xx_data + D2, xx_data + D3, hidden_out_data); #undef MOVE_ONE_STEP
// prev
prev_h_data = hidden_out_data;
prev_c_data = cell_out_data;
move_step();
} // for each step in batch
} // for each batch
} }
void BatchCompute(const framework::ExecutionContext& ctx) const { void BatchCompute(const framework::ExecutionContext& ctx) const {
using DeviceContext = platform::CPUDeviceContext; using DeviceContext = platform::CPUDeviceContext;
INIT_BASE_INPUT_OUTPUT INIT_BASE_INPUT_OUTPUT
if (x->lod()[0].size() == 2) { // batch size == 1 if (x->lod()[0].size() == 2) {
SeqCompute(ctx); SeqCompute(ctx);
return; return;
} }
INIT_BASE_SIZES INIT_BASE_SIZES
INIT_VEC_FUNC INIT_VEC_FUNC
INIT_BASE_INPUT_DATAS
auto* reordered_h0 = ctx.Output<Tensor>("ReorderedH0"); auto* reordered_h0 = ctx.Output<Tensor>("ReorderedH0");
auto* reordered_c0 = ctx.Output<Tensor>("ReorderedC0"); auto* reordered_c0 = ctx.Output<Tensor>("ReorderedC0");
auto* batched_input = ctx.Output<LoDTensor>("BatchedInput"); auto* batched_input = ctx.Output<LoDTensor>("BatchedInput");
auto* batched_c_out = ctx.Output<LoDTensor>("BatchedCell"); auto* batched_c_out = ctx.Output<LoDTensor>("BatchedCell");
auto* batched_h_out = ctx.Output<LoDTensor>("BatchedHidden"); auto* batched_h_out = ctx.Output<LoDTensor>("BatchedHidden");
const T* x_data = x->data<T>();
const T* wx_data = wx->data<T>();
const T* wh_data = wh->data<T>();
const T* bias_data = bias->data<T>();
const T* wc_data = bias_data + D4; // w_ic, w_fc, w_oc
auto place = ctx.GetPlace();
T* xx_data = xx->mutable_data<T>(place); T* xx_data = xx->mutable_data<T>(place);
T* batched_input_data = batched_input->mutable_data<T>(place); T* batched_input_data = batched_input->mutable_data<T>(place);
T* batched_c_out_data = batched_c_out->mutable_data<T>(place); T* batched_c_out_data = batched_c_out->mutable_data<T>(place);
...@@ -427,12 +436,6 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -427,12 +436,6 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
hidden_out->mutable_data<T>(place); hidden_out->mutable_data<T>(place);
cell_out->mutable_data<T>(place); cell_out->mutable_data<T>(place);
// use local variable
framework::DDim check_dims({3, D});
Tensor checked_cell; // w_ic * Ct-1, w_fc * Ct-1, w_oc * Ct
auto checked_cell_data =
checked_cell.mutable_data<T>(check_dims, ctx.GetPlace());
math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch; math::LoDTensor2BatchFunctor<DeviceContext, T> to_batch;
auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto blas = math::GetBlas<DeviceContext, T>(dev_ctx); auto blas = math::GetBlas<DeviceContext, T>(dev_ctx);
...@@ -454,27 +457,17 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -454,27 +457,17 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
reordered_h0->Resize({max_bs, D}); reordered_h0->Resize({max_bs, D});
reordered_c0->Resize({max_bs, D}); reordered_c0->Resize({max_bs, D});
T* prev_batch_h_data = nullptr;
T* prev_batch_c_data = nullptr;
T* cur_batch_in_data = batched_input_data;
T* cur_batch_h_out_data = batched_h_out_data;
T* cur_batch_c_out_data = batched_c_out_data;
auto move_step = [&](int bs) {
cur_batch_in_data += bs * D4;
cur_batch_c_out_data += bs * D;
cur_batch_h_out_data += bs * D;
};
int tstart = 0; int tstart = 0;
T* prev_h_data = nullptr;
T* prev_c_data = nullptr;
if (h0) { if (h0) {
// reorder h0, c0 // reorder h0, c0
T* reordered_h0_data = reordered_h0->mutable_data<T>(place); T* reordered_h0_data = reordered_h0->mutable_data<T>(place);
T* reordered_c0_data = reordered_c0->mutable_data<T>(place); T* reordered_c0_data = reordered_c0->mutable_data<T>(place);
const T* h0_data = h0->data<T>(); const T* h0_data = h0->data<T>();
const T* c0_data = c0->data<T>(); const T* c0_data = c0->data<T>();
prev_batch_h_data = reordered_h0_data; prev_h_data = reordered_h0_data;
prev_batch_c_data = reordered_c0_data; prev_c_data = reordered_c0_data;
size_t sz = sizeof(T) * D; size_t sz = sizeof(T) * D;
for (int i = 0; i < max_bs; ++i) { for (int i = 0; i < max_bs; ++i) {
std::memcpy(reordered_h0_data, h0_data + seq_order[i] * D, sz); std::memcpy(reordered_h0_data, h0_data + seq_order[i] * D, sz);
...@@ -483,123 +476,80 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -483,123 +476,80 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
reordered_c0_data += D; reordered_c0_data += D;
} }
} else { } else {
// Compute with no H0/C0 // compute without h0, c0
T* cur_in_data = cur_batch_in_data; T* cur_in_data = batched_input_data;
T* cur_c_out_data = cur_batch_c_out_data; T* cur_h_out_data = batched_h_out_data;
T* cur_h_out_data = cur_batch_h_out_data; T* cur_c_out_data = batched_c_out_data;
for (int i = 0; i < max_bs; ++i) {
// If step == 0 and there is no initialized hidden state, that is to say GET_Ct_NOH0C0(cur_in_data, cur_c_out_data);
// the H0 is zeros. Then W_h * H_t-1 can be skiped
for (int i = 0; i < max_bs; ++i) { // iterate each data in 1st batch
// ~C_t
act_cand(D, cur_in_data, cur_in_data);
if (use_peepholes) { if (use_peepholes) {
// I_t, F_t blas.VMUL(D, wc_data + D2, cur_c_out_data, cur_in_data + D);
act_gate(D2, cur_in_data + D, cur_in_data + D); blas.VADD(D, cur_in_data + D, cur_in_data + D3, cur_in_data + D3);
} else {
// I_t, F_t, O_t
act_gate(D3, cur_in_data + D, cur_in_data + D);
} }
// C_t = I_t * ~C_t
blas.VMUL(D, cur_in_data, cur_in_data + D, cur_c_out_data);
if (use_peepholes) {
// + W_oc * C_t for peephole connection
blas.VMUL(D, wc_data + D2, cur_c_out_data, checked_cell_data + D2);
blas.VADD(D, cur_in_data + D3, checked_cell_data + D2,
cur_in_data + D3);
// O_t
act_gate(D, cur_in_data + D3, cur_in_data + D3); act_gate(D, cur_in_data + D3, cur_in_data + D3);
} GET_Ht(cur_c_out_data, cur_in_data, cur_h_out_data);
// hidden out= act_state(cellout) * outgate
act_cell(D, cur_c_out_data, cur_in_data + D2);
// H_t = O_t * act_state(C_t)
blas.VMUL(D, cur_in_data + D2, cur_in_data + D3, cur_h_out_data);
// move to next data in the same batch
cur_in_data += D4; cur_in_data += D4;
cur_c_out_data += D; cur_c_out_data += D;
cur_h_out_data += D; cur_h_out_data += D;
} }
// move to data for next timestep
prev_batch_h_data = cur_batch_h_out_data;
prev_batch_c_data = cur_batch_c_out_data;
move_step(max_bs);
tstart = 1; tstart = 1;
prev_h_data = batched_h_out_data;
prev_c_data = batched_c_out_data;
} }
const auto& batch_starts = batched_lod[0]; const auto& batch_starts = batched_lod[0];
const int max_seq_len = batch_starts.size() - 1; const int max_seq_len = batch_starts.size() - 1;
for (int step = tstart; step < max_seq_len; ++step) { const int offset = tstart * max_bs * D;
const int cur_bs = batch_starts[step + 1] - batch_starts[step]; batched_input_data = batched_input_data + offset * 4;
// + W_h * H_t-1 batched_h_out_data = batched_h_out_data + offset;
blas.GEMM(CblasNoTrans, CblasNoTrans, cur_bs, D4, D, static_cast<T>(1), batched_c_out_data = batched_c_out_data + offset;
prev_batch_h_data, D, wh_data, D4, static_cast<T>(1),
cur_batch_in_data, D4); #define DEFINE_CUR \
T* cur_in_data = batched_input_data; \
T* cur_in_data = cur_batch_in_data; T* cur_prev_c_data = prev_c_data; \
T* cur_c_out_data = cur_batch_c_out_data; T* cur_c_out_data = batched_c_out_data; \
T* cur_h_out_data = cur_batch_h_out_data; T* cur_h_out_data = batched_h_out_data
T* prev_c_data = prev_batch_c_data; // NULL if no C0 in step0
T* prev_h_data = prev_batch_h_data; // NULL if no H0 in step0 #define MOVE_ONE_BATCH \
auto next_data_in_batch = [&]() { cur_in_data += D4; \
cur_in_data += D4; cur_prev_c_data += D; \
cur_c_out_data += D; cur_c_out_data += D; \
cur_h_out_data += D; cur_h_out_data += D
prev_c_data = prev_c_data ? prev_c_data + D : nullptr;
prev_h_data = prev_h_data ? prev_h_data + D : nullptr; #define MOVE_ONE_STEP \
}; prev_c_data = batched_c_out_data; \
prev_h_data = batched_h_out_data; \
for (int i = 0; i < cur_bs; ++i) { // iterate each data in same batch batched_c_out_data = cur_c_out_data; \
// ~C_t batched_h_out_data = cur_h_out_data; \
act_cand(D, cur_in_data, cur_in_data); batched_input_data = cur_in_data
if (use_peepholes) { if (use_peepholes) {
// + W_ic|W_fc * C_t-1 for peephole connection for (int step = tstart; step < max_seq_len; ++step) {
blas.VMUL(D, wc_data, prev_c_data, checked_cell_data); const int cur_bs = batch_starts[step + 1] - batch_starts[step];
blas.VMUL(D, wc_data + D, prev_c_data, checked_cell_data + D); GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data);
blas.VADD(D2, cur_in_data + D, checked_cell_data, cur_in_data + D); DEFINE_CUR;
// I_t, F_t for (int i = 0; i < cur_bs; ++i) {
act_gate(D2, cur_in_data + D, cur_in_data + D); COMPUTE_CtHt_PEEPHOLE(cur_in_data, cur_prev_c_data, cur_c_out_data,
} else { cur_h_out_data);
// I_t, F_t, O_t MOVE_ONE_BATCH;
act_gate(D3, cur_in_data + D, cur_in_data + D);
} }
MOVE_ONE_STEP;
// F_t * C_t-1
blas.VMUL(D, cur_in_data + D2, prev_c_data, cur_in_data + D2);
// I_t * ~C_t
blas.VMUL(D, cur_in_data, cur_in_data + D, cur_in_data + D);
// C_t = F_t * C_t-1 + I_t * ~C_t
blas.VADD(D, cur_in_data + D, cur_in_data + D2, cur_c_out_data);
if (use_peepholes) {
// + W_oc * C_t for peephole connection
blas.VMUL(D, wc_data + D2, cur_c_out_data, checked_cell_data + D2);
blas.VADD(D, cur_in_data + D3, checked_cell_data + D2,
cur_in_data + D3);
// O_t
act_gate(D, cur_in_data + D3, cur_in_data + D3);
} }
} else {
// hidden out= act_state(cellout) * outgate for (int step = tstart; step < max_seq_len; ++step) {
act_cell(D, cur_c_out_data, cur_in_data + D2); const int cur_bs = batch_starts[step + 1] - batch_starts[step];
// H_t = O_t * act_state(C_t) GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data);
blas.VMUL(D, cur_in_data + D2, cur_in_data + D3, cur_h_out_data); DEFINE_CUR;
for (int i = 0; i < cur_bs; ++i) {
// move to next data in same batch COMPUTE_CtHt(cur_in_data, cur_prev_c_data, cur_c_out_data,
next_data_in_batch(); cur_h_out_data);
MOVE_ONE_BATCH;
}
MOVE_ONE_STEP;
} }
// move to data for next timestep
prev_batch_h_data = cur_batch_h_out_data;
prev_batch_c_data = cur_batch_c_out_data;
move_step(cur_bs);
} }
#undef MOVE_ONE_STEP
#undef MOVE_ONE_BATCH
#undef DEFINE_CUR
math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq; math::Batch2LoDTensorFunctor<DeviceContext, T> to_seq;
batched_h_out->set_lod(batched_lod); batched_h_out->set_lod(batched_lod);
...@@ -615,6 +565,16 @@ class FuisonLSTMKernel : public framework::OpKernel<T> { ...@@ -615,6 +565,16 @@ class FuisonLSTMKernel : public framework::OpKernel<T> {
BatchCompute(ctx); BatchCompute(ctx);
} }
} }
#undef COMPUTE_CtHt_PEEPHOLE
#undef COMPUTE_CtHt
#undef GET_Ct_NOH0C0
#undef COMPUTE_CtHt_NOH0C0
#undef COMPUTE_CtHt_PEEPHOLE_NOH0C0
#undef GET_Ht
#undef GET_Ct
#undef GEMM_WH_ADDON
#undef INIT_BASE_INPUT_DATAS
#undef INIT_BASE_SIZES #undef INIT_BASE_SIZES
#undef INIT_BASE_INPUT_OUTPUT #undef INIT_BASE_INPUT_OUTPUT
#undef INIT_VEC_FUNC #undef INIT_VEC_FUNC
......
...@@ -121,6 +121,12 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root, ...@@ -121,6 +121,12 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
if (nullptr == dso_handle) { if (nullptr == dso_handle) {
LOG(WARNING) << "Failed to find dynamic library: " << dlPath << " (" LOG(WARNING) << "Failed to find dynamic library: " << dlPath << " ("
<< dlerror() << ")"; << dlerror() << ")";
if (dlPath.find("nccl") != std::string::npos) {
std::cout
<< "You may need to install 'nccl2' from NVIDIA official website: "
<< "https://developer.nvidia.com/nccl/nccl-download"
<< "before install PaddlePaddle" << std::endl;
}
dlPath = dso_name; dlPath = dso_name;
dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags); dso_handle = GetDsoHandleFromDefaultPath(dlPath, dynload_flags);
} }
......
...@@ -500,7 +500,7 @@ EOF ...@@ -500,7 +500,7 @@ EOF
EOF EOF
if [[ ${WITH_GPU} == "ON" ]]; then if [[ ${WITH_GPU} == "ON" ]]; then
NCCL_DEPS="apt-get install -y --allow-downgrades libnccl2=2.1.2-1+cuda${CUDA_MAJOR} libnccl-dev=2.1.2-1+cuda${CUDA_MAJOR} &&" NCCL_DEPS="apt-get install -y --allow-downgrades libnccl2=2.2.13-1+cuda${CUDA_MAJOR} libnccl-dev=2.2.13-1+cuda${CUDA_MAJOR} &&"
else else
NCCL_DEPS="" NCCL_DEPS=""
fi fi
......
...@@ -104,7 +104,7 @@ def batch_images_from_tar(data_file, ...@@ -104,7 +104,7 @@ def batch_images_from_tar(data_file,
pickle.dump( pickle.dump(
output, output,
open('%s/batch_%d' % (out_path, file_id), 'wb'), open('%s/batch_%d' % (out_path, file_id), 'wb'),
protocol=pickle.HIGHEST_PROTOCOL) protocol=2)
file_id += 1 file_id += 1
data = [] data = []
labels = [] labels = []
...@@ -113,9 +113,7 @@ def batch_images_from_tar(data_file, ...@@ -113,9 +113,7 @@ def batch_images_from_tar(data_file,
output['label'] = labels output['label'] = labels
output['data'] = data output['data'] = data
pickle.dump( pickle.dump(
output, output, open('%s/batch_%d' % (out_path, file_id), 'wb'), protocol=2)
open('%s/batch_%d' % (out_path, file_id), 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
with open(meta_file, 'a') as meta: with open(meta_file, 'a') as meta:
for file in os.listdir(out_path): for file in os.listdir(out_path):
......
...@@ -47,14 +47,14 @@ def train_program(): ...@@ -47,14 +47,14 @@ def train_program():
loss = fluid.layers.square_error_cost(input=y_predict, label=y) loss = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_loss = fluid.layers.mean(loss) avg_loss = fluid.layers.mean(loss)
return avg_loss return [avg_loss, y_predict]
def optimizer_func(): def optimizer_func():
return fluid.optimizer.SGD(learning_rate=0.001) return fluid.optimizer.SGD(learning_rate=0.001)
def train(use_cuda, train_program, params_dirname): def train(use_cuda, train_program, params_dirname, inference_model_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer( trainer = fluid.Trainer(
...@@ -74,6 +74,8 @@ def train(use_cuda, train_program, params_dirname): ...@@ -74,6 +74,8 @@ def train(use_cuda, train_program, params_dirname):
''' '''
if params_dirname is not None: if params_dirname is not None:
trainer.save_params(params_dirname) trainer.save_params(params_dirname)
trainer.save_inference_model(inference_model_dirname,
['x'], [1])
trainer.stop() trainer.stop()
trainer.train( trainer.train(
...@@ -99,15 +101,55 @@ def infer(use_cuda, inference_program, params_dirname=None): ...@@ -99,15 +101,55 @@ def infer(use_cuda, inference_program, params_dirname=None):
print("infer results: ", results[0]) print("infer results: ", results[0])
def infer_by_saved_model(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# The input's dimension should be 2-D and the second dim is 13
# The input data should be >= 0
batch_size = 10
test_reader = paddle.batch(
paddle.dataset.uci_housing.test(), batch_size=batch_size)
test_data = next(test_reader())
test_feat = numpy.array(
[data[0] for data in test_data]).astype("float32")
test_label = numpy.array(
[data[1] for data in test_data]).astype("float32")
assert feed_target_names[0] == 'x'
results = exe.run(inference_program,
feed={feed_target_names[0]: numpy.array(test_feat)},
fetch_list=fetch_targets)
print("infer shape: ", results[0].shape)
print("infer results: ", results[0])
print("ground truth: ", test_label)
def main(use_cuda): def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda(): if use_cuda and not fluid.core.is_compiled_with_cuda():
return return
# Directory for saving the trained model # Directory for saving the trained model
params_dirname = "fit_a_line.inference.model" params_dirname = "fit_a_line.model"
inference_model_dirname = "fit_a_line.inference_model"
train(use_cuda, train_program, params_dirname) train(use_cuda, train_program, params_dirname, inference_model_dirname)
infer(use_cuda, inference_program, params_dirname) infer(use_cuda, inference_program, params_dirname)
infer_by_saved_model(use_cuda, inference_model_dirname)
class TestFitALine(unittest.TestCase): class TestFitALine(unittest.TestCase):
......
...@@ -55,6 +55,7 @@ class TestDistRunnerBase(object): ...@@ -55,6 +55,7 @@ class TestDistRunnerBase(object):
pserver_prog = t.get_pserver_program(args.current_endpoint) pserver_prog = t.get_pserver_program(args.current_endpoint)
startup_prog = t.get_startup_program(args.current_endpoint, startup_prog = t.get_startup_program(args.current_endpoint,
pserver_prog) pserver_prog)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(startup_prog) exe.run(startup_prog)
...@@ -147,6 +148,8 @@ def runtime_main(test_class): ...@@ -147,6 +148,8 @@ def runtime_main(test_class):
import paddle.compat as cpt import paddle.compat as cpt
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase): class TestDistBase(unittest.TestCase):
...@@ -156,13 +159,19 @@ class TestDistBase(unittest.TestCase): ...@@ -156,13 +159,19 @@ class TestDistBase(unittest.TestCase):
def setUp(self): def setUp(self):
self._trainers = 2 self._trainers = 2
self._pservers = 2 self._pservers = 2
self._ps_endpoints = "127.0.0.1:9123,127.0.0.1:9124" self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = "python" self._python_interp = "python"
self._sync_mode = True self._sync_mode = True
self._mem_opt = False self._mem_opt = False
self._use_reduce = False self._use_reduce = False
self._setup_config() self._setup_config()
def _find_free_port(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
def start_pserver(self, model_file, check_error_log): def start_pserver(self, model_file, check_error_log):
ps0_ep, ps1_ep = self._ps_endpoints.split(",") ps0_ep, ps1_ep = self._ps_endpoints.split(",")
ps_cmd = "%s %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --is_dist" ps_cmd = "%s %s --role pserver --endpoints %s --trainer_id 0 --current_endpoint %s --trainers %d --is_dist"
......
...@@ -53,12 +53,11 @@ class TestFusionLSTMOp(OpTest): ...@@ -53,12 +53,11 @@ class TestFusionLSTMOp(OpTest):
self.M = 8 self.M = 8
self.D = 16 self.D = 16
self.has_initial_state = False self.has_initial_state = False
self.use_peepholes = False
self.is_reverse = False self.is_reverse = False
self.act_gate = 'sigmoid' self.act_gate = 'sigmoid'
self.act_cell = 'tanh' self.act_cell = 'tanh'
self.act_cand = 'tanh' self.act_cand = 'tanh'
self.use_peepholes = False
self.use_seq = False
self.set_conf() self.set_conf()
T = sum(self.lod[0]) T = sum(self.lod[0])
...@@ -108,7 +107,6 @@ class TestFusionLSTMOp(OpTest): ...@@ -108,7 +107,6 @@ class TestFusionLSTMOp(OpTest):
} }
self.attrs = { self.attrs = {
'use_peepholes': self.use_peepholes, 'use_peepholes': self.use_peepholes,
'use_seq': self.use_seq,
'is_reverse': self.is_reverse, 'is_reverse': self.is_reverse,
'gate_activation': self.act_gate, 'gate_activation': self.act_gate,
'cell_activation': self.act_cell, 'cell_activation': self.act_cell,
...@@ -178,50 +176,18 @@ class TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp): ...@@ -178,50 +176,18 @@ class TestFusionLSTMOpPeepholesReverse(TestFusionLSTMOp):
self.is_reverse = True self.is_reverse = True
class TestFusionLSTMOpPoopholesBS1(TestFusionLSTMOp): class TestFusionLSTMOpPeepholesInitReverse(TestFusionLSTMOp):
def set_conf(self): def set_conf(self):
self.use_peepholes = True self.use_peepholes = True
self.lod = [[3]]
self.D = 16
class TestFusionLSTMOpSeqInit(TestFusionLSTMOp):
def set_conf(self):
self.use_seq = True
self.has_initial_state = True
class TestFusionLSTMOpSeqReverse(TestFusionLSTMOp):
def set_conf(self):
self.use_seq = True
self.is_reverse = True
class TestFusionLSTMOpSeqInitReverse(TestFusionLSTMOp):
def set_conf(self):
self.use_seq = True
self.has_initial_state = True self.has_initial_state = True
self.is_reverse = True self.is_reverse = True
class TestFusionLSTMOpSeqPeepholes(TestFusionLSTMOp): class TestFusionLSTMOpPeepholesBS1(TestFusionLSTMOp):
def set_conf(self): def set_conf(self):
self.use_seq = True
self.use_peepholes = True self.use_peepholes = True
self.lod = [[2]]
self.D = 8
class TestFusionLSTMOpSeqPeepholesInit(TestFusionLSTMOp):
def set_conf(self):
self.use_seq = True
self.use_peepholes = True
self.has_initial_state = True
class TestFusionLSTMOpSeqPeepholesReverse(TestFusionLSTMOp):
def set_conf(self):
self.use_seq = True
self.use_peepholes = True
self.is_reverse = True
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -85,6 +85,7 @@ class TestFetchOp(unittest.TestCase): ...@@ -85,6 +85,7 @@ class TestFetchOp(unittest.TestCase):
assert not math.isnan(np.sum(ret[i])) and \ assert not math.isnan(np.sum(ret[i])) and \
not math.isinf(np.sum(ret[i])) not math.isinf(np.sum(ret[i]))
@unittest.skip(reason="CI timeout")
def test_fetch_op(self): def test_fetch_op(self):
tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16)
tst_reader_iter = tst_reader() tst_reader_iter = tst_reader()
...@@ -139,6 +140,7 @@ class TestFeedParallel(unittest.TestCase): ...@@ -139,6 +140,7 @@ class TestFeedParallel(unittest.TestCase):
if batch_id == 2: if batch_id == 2:
break break
@unittest.skip(reason="CI timeout")
def test_feed_op(self): def test_feed_op(self):
os.environ['CPU_NUM'] = str(4) os.environ['CPU_NUM'] = str(4)
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
......
...@@ -431,6 +431,28 @@ class Trainer(object): ...@@ -431,6 +431,28 @@ class Trainer(object):
exe = executor.Executor(self.place) exe = executor.Executor(self.place)
io.save_persistables(exe, dirname=param_path) io.save_persistables(exe, dirname=param_path)
def save_inference_model(self, param_path, feeded_var_names,
target_var_indexes):
"""
Save model for cpp inference into :code:`param_path`.
Args:
param_path(str): The path to save parameters.
feeded_var_names(list(str)): The name of the vars that you
need to feed in before run program.
target_var_indexes(list(int)): the index of target var that
you need to return in trainer.train_func.
Returns:
None
"""
with self._prog_and_scope_guard():
exe = executor.Executor(self.place)
target_vars = [
self.train_func_outputs[index] for index in target_var_indexes
]
io.save_inference_model(param_path, feeded_var_names, target_vars,
exe)
@contextlib.contextmanager @contextlib.contextmanager
def _prog_and_scope_guard(self): def _prog_and_scope_guard(self):
with framework.program_guard( with framework.program_guard(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册