diff --git a/paddle/scripts/check_env.sh b/benchmark/paddle/image/check_env.sh similarity index 100% rename from paddle/scripts/check_env.sh rename to benchmark/paddle/image/check_env.sh diff --git a/doc/fluid/api/initializer.rst b/doc/fluid/api/initializer.rst index ee69925fda6b3fc850cfb632e8edd359e7fcff9c..f186c9c85a640da49d95a1a62c721b09b3007d83 100644 --- a/doc/fluid/api/initializer.rst +++ b/doc/fluid/api/initializer.rst @@ -33,3 +33,45 @@ Xavier :members: :noindex: +MSRA +------ + +.. autoclass:: paddle.fluid.initializer.MSRA + :members: + :noindex: + +ConstantInitializer +------------------- + +.. autoclass:: paddle.fluid.initializer.ConstantInitializer + :members: + :noindex: + +UniformInitializer +------------------ + +.. autoclass:: paddle.fluid.initializer.UniformInitializer + :members: + :noindex: + +NormalInitializer +----------------- + +.. autoclass:: paddle.fluid.initializer.NormalInitializer + :members: + :noindex: + +XavierInitializer +----------------- + +.. autoclass:: paddle.fluid.initializer.XavierInitializer + :members: + :noindex: + MSRA + ------ + +MSRAInitializer +----------------- +.. autoclass:: paddle.fluid.initializer.MSRAInitializer + :members: + :noindex: diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index 5c02886efd7d11e9520910526fb90ec01e123bae..3790f09c84563fe541bd8d0bc08e23b19d4287ca 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -815,3 +815,8 @@ zeros .. autofunction:: paddle.fluid.layers.zeros :noindex: +topk +---- + +.. autofunction:: paddle.fluid.layers.topk + :noindex: diff --git a/doc/fluid/design/dist_train/mpi_enabled_design.md b/doc/fluid/design/dist_train/mpi_enabled_design.md new file mode 100644 index 0000000000000000000000000000000000000000..4ad3afc7b7522c60460c6f1f387f9415d3738778 --- /dev/null +++ b/doc/fluid/design/dist_train/mpi_enabled_design.md @@ -0,0 +1,46 @@ +# MPI-enabled PaddlePaddle Design doc + +# Background +When we do distribute multi GPU training, the communication overhead between servers become the major bottleneck, because of the following reasons: +1. Must copy at least once from GPU to CPU memory so that the data can be ready to transfer. And for the pserver side, copy data from CPU to GPU introduce more overhead. +2. GPU->CPU data transfer is 10 times slower than data transfer between GPUs or between PCIe devices. +3. TCP connections can not make full use of RDMA 100Gb devices. + +We will use OpenMPI API to PaddlePaddle, which can bring two benefits to PaddlePaddle: +1. Enable RDMA with PaddlePaddle, which bring high-performance low latency networks. +2. Enable GPUDriect with PaddlePaddle, which bring the highest throughput and lowest latency GPU read and write. + +# Change list +* Compile args: Need add compile args to enable MPI support. +* Execute args: Need add execute args to assign when and how to use MPI operations. +* New ops: Need new op ```mpi_send_op``` and ```mpi_listenandserve_op``` to support MPI send and receive. +* Transpiler optimized: Which can add ```mpi_send_op``` and ```mpi_listenandserve_op``` to the running graph. +* MPI utils package: Need MPI utils package as the low-level API supported. + +## Compile args +Because MPI or CUDA need hardware supported, so we will add compile args to enable MPI support and control compiling.Add ```WITH_MPI``` compile args to control MPI to use or not. If the ```WITH_MPI``` is ```ON```, compile system will find openMPI codes in configuration. We should prepare openMPI environment before compiling. + +## Execute args +Launch the script using the ```mpirun``` launcher, For example: ```mpirun -np 3 -hosts node1,node2,node3 python train.py```. By doing this, We can number the actors (trainer/pserver/master) with o .. (n-1). The node's number is the Rank of the calling process in a group of comm (integer), The MPI processes identify each other using a Rank ID. We have to create a mapping between PaddlePaddle's nodes and their Rank ID so that we can communicate with the correct destinations when using MPI operations. + +## New ops +We won't replace all the gRPC requests to MPI requests, the standard gRPC library is used for all administrative operations and the MPI API will be used to transfer tensor or selectRows to Pservers. The base of this idea, we create two new operators to handle requests and receives, the two operators are ```mpi_send_op``` and ```mpi_listenandserve_op```. They are a little similar to [send_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/send_op.cc) and [listen_and_serv_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/listen_and_serv_op.cc), also, We will build a new module to package MPI send and receive process. + +### mpi_send_op +Very similar with ```send_op```, we will replace gRPC code which used to send gradient with ```mpi_module```, at the same time, we will wrap it with ```framework::Async```. + +### mpi_listenandserve_op +Very similar with ```listen_and_serv_op```, we will replace gRPC code which used to receive gradient with ```mpi_module```, at the same time, we will wrap it with ```framework::Async```. + +## Transpiler optimized +**We can get env ```OMPI_COMM_WORLD_SIZE``` and ```OMPI_COMM_WORLD_RANK``` to distinguish use MPI or not, If we use openMPI, the variable in env must exist.** + if confirm to use MPI, we will modify ```send_op``` to ```mpi_send_op``` in distribute_transpiler, and modify ```listenandserve_op``` to ```mpi_listenandserve_op``` also. + +## MPI utils package +In this package, We will write openMPI low-level API to use MPI. +The API included in this package are: +* MPI send and receive module, We will build a new module to package MPI send and receive process. MPI send and receive are different to gRPC, the MPI [recvice](https://www.open-mpi.org/doc/v1.8/man3/MPI_Irecv.3.php) must know receive buffer size and receive buffer element. For this reason, We have to make communications twice, the first one is to send metadata about gradient through gRPC, the second one is the real communication through MPI which send gradient data to mpi_listenandserve_op. +The detailed flow is below: +![](https://github.com/seiriosPlus/Paddle/blob/mpi_enabled/doc/fluid/design/dist_train/src/mpi_module.png) +* MPI global configurations, which store the Rank ID and the mapping in global variables, for example: +gRPC client : MPI nodes :``` 127.0.0.1:32004 : 3 ``` diff --git a/doc/fluid/design/dist_train/src/mpi_module.png b/doc/fluid/design/dist_train/src/mpi_module.png new file mode 100644 index 0000000000000000000000000000000000000000..e6b6a3e5d6f68baeeb67d7f71154bd8d85f32b6f Binary files /dev/null and b/doc/fluid/design/dist_train/src/mpi_module.png differ diff --git a/doc/v2/api/data/data_reader.rst b/doc/v2/api/data/data_reader.rst index 2ccfec9c284877a7576e9751526b169a4ac78d8e..d7c896a6270b488ca4449e5211d0d0879eda6ac5 100644 --- a/doc/v2/api/data/data_reader.rst +++ b/doc/v2/api/data/data_reader.rst @@ -6,7 +6,43 @@ Data Reader Interface DataTypes ========= -.. automodule:: paddle.v2.data_type +.. autofunction:: paddle.v2.data_type.dense_array + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.integer_value_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_binary_vector_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_float_vector_sub_sequence + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_non_value_slot + :noindex: + +.. autofunction:: paddle.v2.data_type.sparse_value_slot + :noindex: + +.. autoclass:: paddle.v2.data_type.InputType :members: :noindex: diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 1f3ca24df16cf080d325fbdc0d613a828e384b2a..340b891e41671df7e61a4a66ec538d4603bb9842 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -102,7 +102,7 @@ cc_test(init_test SRCS init_test.cc DEPS init) cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) -cc_test(channel_test SRCS channel_test.cc) +# cc_test(channel_test SRCS channel_test.cc) cc_test(tuple_test SRCS tuple_test.cc ) cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index dfc52b012f8b6bf5cf1a3feab90dc1ec7842ad6c..bcd61335be0f7fe64563ee65daaf9de0760c9b1a 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -77,14 +77,9 @@ struct TestBroadcastOpHandle { local_scopes_[input_scope_idx]->Var("input"); op_handle_.reset(new BroadcastOpHandle(local_scopes_, gpu_list_)); - - vars_.emplace_back(new VarHandle()); - VarHandle* in_var_handle = static_cast(vars_.back().get()); - in_var_handle->place_ = gpu_list_[input_scope_idx]; - in_var_handle->name_ = "input"; - in_var_handle->version_ = 1; - in_var_handle->scope_idx_ = input_scope_idx; - in_var_handle->generated_op_ = nullptr; + auto* in_var_handle = + new VarHandle(1, input_scope_idx, "input", gpu_list_[input_scope_idx]); + vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add dummy var @@ -96,12 +91,8 @@ struct TestBroadcastOpHandle { for (size_t j = 0; j < gpu_list_.size(); ++j) { op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); - vars_.emplace_back(new VarHandle()); - VarHandle* out_var_handle = static_cast(vars_.back().get()); - out_var_handle->place_ = gpu_list_[j]; - out_var_handle->name_ = "out"; - out_var_handle->version_ = 2; - out_var_handle->scope_idx_ = j; + VarHandle* out_var_handle = new VarHandle(2, j, "out", gpu_list_[j]); + vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc index 10839f239d59e97946575297a6d125968a1458f4..2da8c89d2df73215b748f102d9bbfc5b742cf97f 100644 --- a/paddle/fluid/framework/details/gather_op_handle_test.cc +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -79,13 +79,8 @@ struct TestGatherOpHandle { // add input for (size_t j = 0; j < gpu_list_.size(); ++j) { op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get(); - vars_.emplace_back(new VarHandle()); - VarHandle* in_var_handle = static_cast(vars_.back().get()); - in_var_handle->place_ = gpu_list_[j]; - in_var_handle->name_ = "input"; - in_var_handle->version_ = 1; - in_var_handle->scope_idx_ = j; - in_var_handle->generated_op_ = nullptr; + auto* in_var_handle = new VarHandle(1, j, "input", gpu_list_[j]); + vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); } @@ -97,12 +92,9 @@ struct TestGatherOpHandle { op_handle_->AddInput(in_dummy_var_handle); // add output - vars_.emplace_back(new VarHandle()); - VarHandle* out_var_handle = static_cast(vars_.back().get()); - out_var_handle->place_ = gpu_list_[input_scope_idx]; - out_var_handle->name_ = "out"; - out_var_handle->version_ = 2; - out_var_handle->scope_idx_ = input_scope_idx; + auto* out_var_handle = + new VarHandle(2, input_scope_idx, "out", gpu_list_[input_scope_idx]); + vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); // add dummy var diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 5a95cbc53625888bac539f91af391ff0babec17b..4d76dbf7f6ffcf6c82ebf7defd9334bbe64a451c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -177,13 +177,9 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( auto &prev_grad = vars[vars.size() - 1]; op_handle->AddInput(prev_grad.get()); - vars.emplace_back(new VarHandle); - auto &var = vars.back(); - var->place_ = p; - var->name_ = og; - var->version_ = vars.size() - 1; - - op_handle->AddOutput(var.get()); + auto var = new VarHandle(vars.size() - 1, i, og, p); + vars.emplace_back(var); + op_handle->AddOutput(var); } #else PADDLE_ENFORCE("Not implemented"); diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc index 1e48f75958a3ada4d1cd5c8d0f920da4fed2157e..e587210b357ea6caa3272903d8aa6b3e4b2e8228 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -73,8 +73,9 @@ void NCCLAllReduceOpHandle::RunImpl() { for (size_t i = 0; i < local_scopes_.size(); ++i) { auto *s = local_scopes_[i]; + auto &local_scope = *s->FindVar(kLocalExecScopeName)->Get(); - auto &lod_tensor = s->FindVar(var_name)->Get(); + auto &lod_tensor = local_scope.FindVar(var_name)->Get(); lod_tensors.emplace_back(lod_tensor); } @@ -110,17 +111,21 @@ void NCCLAllReduceOpHandle::RunImpl() { } }); } else { // Special handle CPU only Operator's gradient. Like CRF - auto &trg = - *this->local_scopes_[0]->Var()->GetMutable(); + auto &trg = *this->local_scopes_[0] + ->FindVar(kLocalExecScopeName) + ->Get() + ->Var() + ->GetMutable(); // Reduce All Tensor to trg in CPU ReduceLoDTensor func(lod_tensors, &trg); VisitDataType(ToDataType(lod_tensors[0].type()), func); for (size_t i = 0; i < local_scopes_.size(); ++i) { - auto &scope = local_scopes_[i]; + auto &scope = + *local_scopes_[i]->FindVar(kLocalExecScopeName)->Get(); auto &p = places_[i]; - auto *var = scope->FindVar(var_name); + auto *var = scope.FindVar(var_name); auto *dev_ctx = dev_ctxes_[p]; RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 7fb9f99a8a1bc044e2f25f373265a5ec9f7d76d5..7a65ee62c9bfc0dad2ebee3be21de825fa405d73 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -30,10 +30,11 @@ ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} void ScaleLossGradOpHandle::RunImpl() { std::string var_name = static_cast(this->outputs_[0])->name_; + auto &local_scope = *scope_->FindVar(kLocalExecScopeName)->Get(); - float *tmp = - scope_->FindVar(var_name)->GetMutable()->mutable_data( - make_ddim({1}), place_); + float *tmp = local_scope.FindVar(var_name) + ->GetMutable() + ->mutable_data(make_ddim({1}), place_); if (platform::is_cpu_place(place_)) { *tmp = coeff_; diff --git a/paddle/fluid/framework/details/ssa_graph_builder.cc b/paddle/fluid/framework/details/ssa_graph_builder.cc index be5fb7577581fd99b1b7b80ccdd2acb8d3a91f01..25e8c77bb489546092b2a93e052da7dd0dd5edf4 100644 --- a/paddle/fluid/framework/details/ssa_graph_builder.cc +++ b/paddle/fluid/framework/details/ssa_graph_builder.cc @@ -54,13 +54,8 @@ VarHandle *SSAGraphBuilder::CreateOrGetLatestVarHandle( auto &var_holder = var_holders[each_var_name]; VarHandle *var = nullptr; if (var_holder.empty()) { - var_holder.emplace_back(new VarHandle); - auto &init_var = var_holder[0]; - init_var->place_ = place; - init_var->name_ = each_var_name; - init_var->generated_op_ = nullptr; - init_var->version_ = 0; - var = init_var.get(); + var = new VarHandle(0, place_offset, each_var_name, place); + var_holder.emplace_back(var); } else { var = var_holder.rbegin()->get(); } @@ -73,12 +68,9 @@ void SSAGraphBuilder::CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, size_t place_offset) { auto &vars = graph->vars_[place_offset][each_var_name]; size_t version = vars.size(); - vars.emplace_back(new VarHandle()); - auto &var = vars.back(); - var->version_ = version; - var->name_ = each_var_name; - var->place_ = place; - op_handle->AddOutput(var.get()); + auto var = new VarHandle(version, place_offset, each_var_name, place); + vars.emplace_back(var); + op_handle->AddOutput(var); } template diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index 871e41343f53b801a22d3a450f0906f37fb372d1..2b887c67e6fc6ea78e42fbb9fd170f740db27d97 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -16,6 +16,7 @@ #include #include #include +#include #include "paddle/fluid/platform/place.h" @@ -33,10 +34,10 @@ struct VarHandleBase { // The operator who generate this variable. nullptr if the variable // is a root node. - OpHandleBase *generated_op_; + OpHandleBase* generated_op_{nullptr}; // Operators which depend on this variable ready. - std::unordered_set pending_ops_; + std::unordered_set pending_ops_; }; // VarHandle is actually a single version of Runtime Variable. @@ -47,6 +48,13 @@ struct VarHandleBase { struct VarHandle : public VarHandleBase { std::string DebugString() const override; + VarHandle(size_t version, size_t scope_index, std::string name, + platform::Place place) + : version_(version), + scope_idx_(scope_index), + name_(std::move(name)), + place_(std::move(place)) {} + // version field currently is not used, however, just store the version to // debug easily. size_t version_; diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 0962f40c4a64f18f7105626c54a83f1c5b299c50..106b5f866ed5225d67082310e308984d8b3f19ed 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -63,13 +63,14 @@ ParallelExecutor::ParallelExecutor( // Step 1. Bcast the params to devs. // Create local scopes if (local_scopes.empty()) { - for (size_t i = 0; i < member_->places_.size(); ++i) { - member_->local_scopes_.push_back(&scope->NewScope()); + member_->local_scopes_.emplace_back(member_->global_scope_); + for (size_t i = 1; i < member_->places_.size(); ++i) { + member_->local_scopes_.emplace_back(&scope->NewScope()); } } else { PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size()); for (size_t i = 0; i < member_->places_.size(); ++i) { - member_->local_scopes_.push_back(local_scopes[i]); + member_->local_scopes_.emplace_back(local_scopes[i]); } } @@ -159,7 +160,9 @@ void ParallelExecutor::Run(const std::vector &fetch_tensors, const std::string &fetched_var_name) { platform::RecordBlock b(0); // Create local scopes. - for (auto &scope : member_->local_scopes_) { + for (auto it = member_->local_scopes_.rbegin(); + it != member_->local_scopes_.rend(); ++it) { + auto &scope = *it; Scope &local_scope = scope->NewScope(); *scope->Var(details::kLocalExecScopeName)->GetMutable() = &local_scope; @@ -173,7 +176,7 @@ void ParallelExecutor::Run(const std::vector &fetch_tensors, InitializeVariable(scope->Var(std::get<0>(name_type_pair)), std::get<1>(name_type_pair)); } else { - InitializeVariable(scope->Var(std::get<0>(name_type_pair)), + InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)), std::get<1>(name_type_pair)); } } diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 718f469d38c3c6b7272c1531fae0a1e9ad2e8e3e..4a8dfd4b54227070c2143b180f8ab92753885550 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/beam_search_decode_op.h" +#include #include "paddle/fluid/platform/device_context.h" namespace paddle { diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 3cc6ed310575473fae8e91a8507fb9146107e841..4cb0457d9285e20d4b6a2f9987b7fdb1c6ac157f 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" @@ -87,7 +88,7 @@ struct BeamSearchDecoder { */ std::vector> PackTwoSteps( const LoDTensor& cur_ids, const LoDTensor& cur_scores, - std::vector>& prefixes_list, + std::vector>* prefixes_list, std::vector>* sentence_vector_list) const; /** @@ -140,7 +141,7 @@ Sentence BeamSearchDecoder::MakeSentence(const BeamNode* node) const { template std::vector> BeamSearchDecoder::PackTwoSteps( const LoDTensor& cur_ids, const LoDTensor& cur_scores, - std::vector>& prefixes_list, + std::vector>* prefixes_list, std::vector>* sentence_vector_list) const { std::vector> result; @@ -153,7 +154,7 @@ std::vector> BeamSearchDecoder::PackTwoSteps( // if prefixes size is 0, it means this is the first step. In this step, // all candidate id is the start of candidate sentences. - if (prefixes_list.empty()) { + if (prefixes_list->empty()) { PADDLE_ENFORCE_EQ(cur_ids.lod().at(kSourceLevel).back(), cur_ids.lod().at(kSentenceLevel).back(), "in the first step"); @@ -162,7 +163,7 @@ std::vector> BeamSearchDecoder::PackTwoSteps( cur_ids.data()[id_idx], cur_scores.data()[id_idx]))); } } else { - BeamNodeVector& prefixes = prefixes_list[src_idx]; + BeamNodeVector& prefixes = prefixes_list->at(src_idx); SentenceVector& sentence_vector = (*sentence_vector_list)[src_idx]; PADDLE_ENFORCE_EQ(src_end - src_start, prefixes.size(), @@ -262,7 +263,7 @@ void BeamSearchDecoder::PackAllSteps(const LoDTensorArray& step_ids, for (size_t step_id = 0; step_id < step_num; ++step_id) { beamnode_vector_list = PackTwoSteps(step_ids.at(step_id), step_scores.at(step_id), - beamnode_vector_list, &sentence_vector_list); + &beamnode_vector_list, &sentence_vector_list); } // append last beam_node to result for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index c3faf46e09bb40d01049fd9cfd79836c1d2bd5bb..36f9594969c416c694928811012baf94332bbd91 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -125,7 +125,7 @@ TEST(BeamSearchDecodeOp, PackTwoStepsFistStep) { BeamSearchDecoder helper; beamnode_vector_list = helper.PackTwoSteps( - ids[0], scores[0], beamnode_vector_list, &sentence_vector_list); + ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list); ASSERT_EQ(beamnode_vector_list.size(), 2UL); ASSERT_EQ(beamnode_vector_list[0].size(), 2UL); ASSERT_EQ(beamnode_vector_list[1].size(), 4UL); @@ -167,7 +167,7 @@ TEST(BeamSearchDecodeOp, PackTwoSteps) { BeamSearchDecoder helper1; beamnode_vector_list = helper1.PackTwoSteps( - ids[0], scores[0], beamnode_vector_list, &sentence_vector_list); + ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list); ASSERT_EQ(sentence_vector_list[0].size(), 1UL); ASSERT_EQ(sentence_vector_list[1].size(), 0UL); diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index e848b1f12cb9f1ce1d37e0e0233bfc361dc35a33..fdab4e92f47c7c8f241d93268a73dcb8c2eb2dc6 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -14,7 +14,10 @@ limitations under the License. */ #include "paddle/fluid/operators/beam_search_op.h" +#include #include +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index b333ef4e6c73be15dfea2cadb153d2484b3daaf7..0a481a85ce6fbb582b8c0e12710455aaaac72aa1 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -18,6 +18,8 @@ limitations under the License. */ #include "gtest/gtest.h" #endif +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/operator.h" diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 77d3cffe7c19affe66223363eba26e2d77cdcd43..95440ff89e883e754795c67cd58a08f1131df368 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/chunk_eval_op.h" +#include +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h index 9e97f7c7762ed6bded94be35ae8a094466e0aec0..8631415062db839476e2536a9836e4b9f069a3e2 100644 --- a/paddle/fluid/operators/chunk_eval_op.h +++ b/paddle/fluid/operators/chunk_eval_op.h @@ -14,6 +14,9 @@ limitations under the License. */ #pragma once #include +#include +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -36,11 +39,11 @@ class ChunkEvalKernel : public framework::OpKernel { }; void GetSegments(const int64_t* label, int length, - std::vector& segments, int num_chunk_types, + std::vector* segments, int num_chunk_types, int num_tag_types, int other_chunk_type, int tag_begin, int tag_inside, int tag_end, int tag_single) const { - segments.clear(); - segments.reserve(length); + segments->clear(); + segments->reserve(length); int chunk_start = 0; bool in_chunk = false; int tag = -1; @@ -58,7 +61,7 @@ class ChunkEvalKernel : public framework::OpKernel { i - 1, // end prev_type, }; - segments.push_back(segment); + segments->push_back(segment); in_chunk = false; } if (ChunkBegin(prev_tag, prev_type, tag, type, other_chunk_type, @@ -73,7 +76,7 @@ class ChunkEvalKernel : public framework::OpKernel { length - 1, // end type, }; - segments.push_back(segment); + segments->push_back(segment); } } @@ -177,8 +180,8 @@ class ChunkEvalKernel : public framework::OpKernel { for (int i = 0; i < num_sequences; ++i) { int seq_length = lod[0][i + 1] - lod[0][i]; EvalOneSeq(inference_data + lod[0][i], label_data + lod[0][i], seq_length, - output_segments, label_segments, *num_infer_chunks_data, - *num_label_chunks_data, *num_correct_chunks_data, + &output_segments, &label_segments, num_infer_chunks_data, + num_label_chunks_data, num_correct_chunks_data, num_chunk_types, num_tag_types, other_chunk_type, tag_begin, tag_inside, tag_end, tag_single, excluded_chunk_types); } @@ -197,10 +200,10 @@ class ChunkEvalKernel : public framework::OpKernel { } void EvalOneSeq(const int64_t* output, const int64_t* label, int length, - std::vector& output_segments, - std::vector& label_segments, - int64_t& num_output_segments, int64_t& num_label_segments, - int64_t& num_correct, int num_chunk_types, int num_tag_types, + std::vector* output_segments, + std::vector* label_segments, + int64_t* num_output_segments, int64_t* num_label_segments, + int64_t* num_correct, int num_chunk_types, int num_tag_types, int other_chunk_type, int tag_begin, int tag_inside, int tag_end, int tag_single, const std::set& excluded_chunk_types) const { @@ -209,25 +212,29 @@ class ChunkEvalKernel : public framework::OpKernel { GetSegments(label, length, label_segments, num_chunk_types, num_tag_types, other_chunk_type, tag_begin, tag_inside, tag_end, tag_single); size_t i = 0, j = 0; - while (i < output_segments.size() && j < label_segments.size()) { - if (output_segments[i] == label_segments[j] && - excluded_chunk_types.count(output_segments[i].type) != 1) { - ++num_correct; + while (i < output_segments->size() && j < label_segments->size()) { + if (output_segments->at(i) == label_segments->at(j) && + excluded_chunk_types.count(output_segments->at(i).type) != 1) { + ++(*num_correct); } - if (output_segments[i].end < label_segments[j].end) { + if (output_segments->at(i).end < label_segments->at(j).end) { ++i; - } else if (output_segments[i].end > label_segments[j].end) { + } else if (output_segments->at(i).end > label_segments->at(j).end) { ++j; } else { ++i; ++j; } } - for (auto& segment : label_segments) { - if (excluded_chunk_types.count(segment.type) != 1) ++num_label_segments; + for (auto& segment : (*label_segments)) { + if (excluded_chunk_types.count(segment.type) != 1) { + ++(*num_label_segments); + } } - for (auto& segment : output_segments) { - if (excluded_chunk_types.count(segment.type) != 1) ++num_output_segments; + for (auto& segment : (*output_segments)) { + if (excluded_chunk_types.count(segment.type) != 1) { + ++(*num_output_segments); + } } } }; diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 0a8a5d4c71c4510f04eea2f7ef12f836d1fd9c9b..63d371310d2a26a1460e527fc51923dfd6e0b8bc 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -73,9 +73,11 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { dst_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, (void*)input_data); + mkldnn::memory({src_md, mkldnn_engine}, + reinterpret_cast(const_cast(input_data))); auto weights_memory = - mkldnn::memory({weights_md, mkldnn_engine}, (void*)filter_data); + mkldnn::memory({weights_md, mkldnn_engine}, + reinterpret_cast(const_cast(filter_data))); auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, output_data); std::shared_ptr conv_pd = @@ -180,8 +182,9 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { dst_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); // create memory - auto diff_dst_memory = mkldnn::memory({diff_weights_md, mkldnn_engine}, - (void*)output_grad_data); + auto diff_dst_memory = mkldnn::memory( + {diff_weights_md, mkldnn_engine}, + reinterpret_cast(const_cast(output_grad_data))); // Retrieve conv_pd from device context auto conv_pd = std::static_pointer_cast( @@ -198,10 +201,12 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { mkldnn_engine); // create memory - auto diff_weights_memory = mkldnn::memory( - {diff_weights_md, mkldnn_engine}, (void*)filter_grad_data); + auto diff_weights_memory = + mkldnn::memory({diff_weights_md, mkldnn_engine}, + reinterpret_cast(filter_grad_data)); auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, (void*)input_data); + mkldnn::memory({src_md, mkldnn_engine}, + reinterpret_cast(const_cast(input_data))); // create backward conv primitive for weights auto conv_bwd_weights_prim = mkldnn::convolution_backward_weights( @@ -220,10 +225,12 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { strides, paddings, *conv_pd, mkldnn_engine); // create memory - auto diff_src_memory = - mkldnn::memory({diff_src_md, mkldnn_engine}, (void*)input_grad_data); + auto diff_src_memory = mkldnn::memory( + {diff_src_md, mkldnn_engine}, + reinterpret_cast(const_cast(input_grad_data))); auto weights_memory = - mkldnn::memory({weights_md, mkldnn_engine}, (void*)filter_data); + mkldnn::memory({weights_md, mkldnn_engine}, + reinterpret_cast(const_cast(filter_data))); // create backward conv primitive for data auto conv_bwd_data_prim = mkldnn::convolution_backward_data( diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 12b45f1d65019f623268cb9da9004bac5e1f72a3..d6f86a5c88e37970379da0afe2a1d46e18b653f4 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/depthwise_conv.h" @@ -41,9 +42,10 @@ inline int ConvOutputSize(int input_size, int filter_size, int dilation, return output_size; } -inline bool IsExpand(std::vector& filter_dim, - std::vector& strides, std::vector& paddings, - std::vector& dilations) { +inline bool IsExpand(const std::vector& filter_dim, + const std::vector& strides, + const std::vector& paddings, + const std::vector& dilations) { bool filter_1 = true, strides_1 = true, padding_0 = true, dilation_1 = true; for (size_t j = 0; j < strides.size(); ++j) { filter_1 = filter_1 && (static_cast(filter_dim[j + 2]) == 1); diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 93ef15b9332168a9c62abfd4d0827207173ece45..38f43b6d031372948bd82c686a2d9ce5f8ecd07c 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection_map_op.h" +#include namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h index 8c15bfa36bfe72586cfcbdbd8efc4542253adaca..431812e2bfcf926cadf8d7be6a7d1a79e78c7762 100644 --- a/paddle/fluid/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -13,6 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include +#include +#include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -82,7 +87,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { std::vector>> gt_boxes; std::vector>>> detect_boxes; - GetBoxes(*in_label, *in_detect, gt_boxes, detect_boxes); + GetBoxes(*in_label, *in_detect, >_boxes, detect_boxes); std::map label_pos_count; std::map>> true_pos; @@ -95,20 +100,20 @@ class DetectionMAPOpKernel : public framework::OpKernel { } if (in_pos_count != nullptr && state) { - GetInputPos(*in_pos_count, *in_true_pos, *in_false_pos, label_pos_count, - true_pos, false_pos, class_num); + GetInputPos(*in_pos_count, *in_true_pos, *in_false_pos, &label_pos_count, + &true_pos, &false_pos, class_num); } CalcTrueAndFalsePositive(gt_boxes, detect_boxes, evaluate_difficult, - overlap_threshold, label_pos_count, true_pos, - false_pos); + overlap_threshold, &label_pos_count, &true_pos, + &false_pos); int background_label = ctx.Attr("background_label"); T map = CalcMAP(ap_type, label_pos_count, true_pos, false_pos, background_label); - GetOutputPos(ctx, label_pos_count, true_pos, false_pos, *out_pos_count, - *out_true_pos, *out_false_pos, class_num); + GetOutputPos(ctx, label_pos_count, true_pos, false_pos, out_pos_count, + out_true_pos, out_false_pos, class_num); T* map_data = out_map->mutable_data(ctx.GetPlace()); map_data[0] = map; @@ -155,7 +160,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { void GetBoxes(const framework::LoDTensor& input_label, const framework::LoDTensor& input_detect, - std::vector>>& gt_boxes, + std::vector>>* gt_boxes, std::vector>>>& detect_boxes) const { auto labels = framework::EigenTensor::From(input_label); @@ -179,7 +184,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { box.is_difficult = true; boxes[label].push_back(box); } - gt_boxes.push_back(boxes); + gt_boxes->push_back(boxes); } auto detect_index = detect_lod[0]; @@ -200,9 +205,9 @@ class DetectionMAPOpKernel : public framework::OpKernel { const std::map& label_pos_count, const std::map>>& true_pos, const std::map>>& false_pos, - framework::Tensor& output_pos_count, - framework::LoDTensor& output_true_pos, - framework::LoDTensor& output_false_pos, const int class_num) const { + framework::Tensor* output_pos_count, + framework::LoDTensor* output_true_pos, + framework::LoDTensor* output_false_pos, const int class_num) const { int true_pos_count = 0; int false_pos_count = 0; for (auto it = true_pos.begin(); it != true_pos.end(); ++it) { @@ -214,12 +219,12 @@ class DetectionMAPOpKernel : public framework::OpKernel { false_pos_count += fp.size(); } - int* pos_count_data = output_pos_count.mutable_data( + int* pos_count_data = output_pos_count->mutable_data( framework::make_ddim({class_num, 1}), ctx.GetPlace()); - T* true_pos_data = output_true_pos.mutable_data( + T* true_pos_data = output_true_pos->mutable_data( framework::make_ddim({true_pos_count, 2}), ctx.GetPlace()); - T* false_pos_data = output_false_pos.mutable_data( + T* false_pos_data = output_false_pos->mutable_data( framework::make_ddim({false_pos_count, 2}), ctx.GetPlace()); true_pos_count = 0; false_pos_count = 0; @@ -261,21 +266,21 @@ class DetectionMAPOpKernel : public framework::OpKernel { framework::LoD false_pos_lod; false_pos_lod.emplace_back(false_pos_starts); - output_true_pos.set_lod(true_pos_lod); - output_false_pos.set_lod(false_pos_lod); + output_true_pos->set_lod(true_pos_lod); + output_false_pos->set_lod(false_pos_lod); return; } void GetInputPos(const framework::Tensor& input_pos_count, const framework::LoDTensor& input_true_pos, const framework::LoDTensor& input_false_pos, - std::map& label_pos_count, - std::map>>& true_pos, - std::map>>& false_pos, + std::map* label_pos_count, + std::map>>* true_pos, + std::map>>* false_pos, const int class_num) const { const int* pos_count_data = input_pos_count.data(); for (int i = 0; i < class_num; ++i) { - label_pos_count[i] = pos_count_data[i]; + (*label_pos_count)[i] = pos_count_data[i]; } auto SetData = [](const framework::LoDTensor& pos_tensor, @@ -291,8 +296,8 @@ class DetectionMAPOpKernel : public framework::OpKernel { } }; - SetData(input_true_pos, true_pos); - SetData(input_false_pos, false_pos); + SetData(input_true_pos, *true_pos); + SetData(input_false_pos, *false_pos); return; } @@ -301,9 +306,9 @@ class DetectionMAPOpKernel : public framework::OpKernel { const std::vector>>>& detect_boxes, bool evaluate_difficult, float overlap_threshold, - std::map& label_pos_count, - std::map>>& true_pos, - std::map>>& false_pos) const { + std::map* label_pos_count, + std::map>>* true_pos, + std::map>>* false_pos) const { int batch_size = gt_boxes.size(); for (int n = 0; n < batch_size; ++n) { auto image_gt_boxes = gt_boxes[n]; @@ -320,10 +325,10 @@ class DetectionMAPOpKernel : public framework::OpKernel { continue; } int label = it->first; - if (label_pos_count.find(label) == label_pos_count.end()) { - label_pos_count[label] = count; + if (label_pos_count->find(label) == label_pos_count->end()) { + (*label_pos_count)[label] = count; } else { - label_pos_count[label] += count; + (*label_pos_count)[label] += count; } } } @@ -338,8 +343,8 @@ class DetectionMAPOpKernel : public framework::OpKernel { int label = it->first; for (size_t i = 0; i < pred_boxes.size(); ++i) { auto score = pred_boxes[i].first; - true_pos[label].push_back(std::make_pair(score, 0)); - false_pos[label].push_back(std::make_pair(score, 1)); + (*true_pos)[label].push_back(std::make_pair(score, 0)); + (*false_pos)[label].push_back(std::make_pair(score, 1)); } } continue; @@ -351,8 +356,8 @@ class DetectionMAPOpKernel : public framework::OpKernel { if (image_gt_boxes.find(label) == image_gt_boxes.end()) { for (size_t i = 0; i < pred_boxes.size(); ++i) { auto score = pred_boxes[i].first; - true_pos[label].push_back(std::make_pair(score, 0)); - false_pos[label].push_back(std::make_pair(score, 1)); + (*true_pos)[label].push_back(std::make_pair(score, 0)); + (*false_pos)[label].push_back(std::make_pair(score, 1)); } continue; } @@ -381,17 +386,17 @@ class DetectionMAPOpKernel : public framework::OpKernel { (!evaluate_difficult && !matched_bboxes[max_idx].is_difficult); if (match_evaluate_difficult) { if (!visited[max_idx]) { - true_pos[label].push_back(std::make_pair(score, 1)); - false_pos[label].push_back(std::make_pair(score, 0)); + (*true_pos)[label].push_back(std::make_pair(score, 1)); + (*false_pos)[label].push_back(std::make_pair(score, 0)); visited[max_idx] = true; } else { - true_pos[label].push_back(std::make_pair(score, 0)); - false_pos[label].push_back(std::make_pair(score, 1)); + (*true_pos)[label].push_back(std::make_pair(score, 0)); + (*false_pos)[label].push_back(std::make_pair(score, 1)); } } } else { - true_pos[label].push_back(std::make_pair(score, 0)); - false_pos[label].push_back(std::make_pair(score, 1)); + (*true_pos)[label].push_back(std::make_pair(score, 0)); + (*false_pos)[label].push_back(std::make_pair(score, 1)); } } } diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index 3b89ad5d49c339cf05abc0f8577e895f30dddfd4..913a9145420dae7c4f6a4df10c0330636b5796b0 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -14,6 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/edit_distance_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/cuda_helper.h" #include "paddle/fluid/platform/gpu_info.h" diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 5d293665f0bcc098126ad3ec6c9bf34ff54c3b6f..a4c925b538ef916e88ec06cea6de57f31eaf069b 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include // NOLINT #include @@ -67,7 +68,7 @@ ListenAndServOp::ListenAndServOp(const std::string &type, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} -int ListenAndServOp::GetSelectedPort() { +int ListenAndServOp::GetSelectedPort() const { return rpc_service_->GetSelectedPort(); } @@ -99,7 +100,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, framework::Executor executor(dev_place); std::vector block_list; for (size_t blkid = 1; blkid < num_blocks; ++blkid) { - if (blkid != prefetch_block->ID()) { + if (blkid != static_cast(prefetch_block->ID())) { block_list.push_back(blkid); } } @@ -121,10 +122,14 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, rpc_service_->SetProgram(program); // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); - // FIXME(typhoonzero): do we need to wait until the server port is ready? + VLOG(3) << "wait server thread to become ready..."; sleep(5); + // Write to a file of server selected port for python use. + std::ofstream port_file; + port_file.open("/tmp/paddle.selected_port"); + port_file << rpc_service_->GetSelectedPort(); + port_file.close(); - // TODO(typhoonzero): change this to a while_op for every cluster-batch. bool exit_flag = false; // Record received sparse variables, so that // we could reset those after execute optimize program @@ -175,7 +180,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, parallel_blkids.push_back(1); double ts = detail::GetTimestamp(); for (size_t blkid = 2; blkid < num_blocks; ++blkid) { - if (blkid != prefetch_block->ID()) { + if (blkid != static_cast(prefetch_block->ID())) { if (program->Block(blkid).Parent() != last_parent_blkid) { ParallelExecuteBlocks(parallel_blkids, &executor, optimize_prepared, program, &recv_scope); diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index 759b2a462ba5b938991aa86be9b9dc3e59fe3f7e..9744921cef7c0f13c94b7fe729561de8e181650c 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -39,7 +39,7 @@ class ListenAndServOp : public framework::OperatorBase { const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs); - int GetSelectedPort(); + int GetSelectedPort() const; void Stop() override; diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 3bf5d57809019d3ae469471c2ee2e7aac70b9faf..a342874f97460cf624ff0047915d33ba4161f19b 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -139,7 +139,6 @@ void StartServerNet(bool is_sparse) { attrs.insert({"PrefetchBlock", prefetch_block}); listen_and_serv_op = f::OpRegistry::CreateOp("listen_and_serv", {{"X", {"x1"}}}, {}, attrs); - LOG(INFO) << "selected port before run " << selected_port; listen_and_serv_op->Run(scope, place); LOG(INFO) << "server exit"; } @@ -158,16 +157,13 @@ TEST(SendRecvOp, CPUDense) { selected_port = static_cast( listen_and_serv_op.get()) ->GetSelectedPort(); - LOG(INFO) << "selected port " << selected_port; std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); auto send_op = f::OpRegistry::CreateOp( "send", {{"X", {"x1"}}}, {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); - LOG(INFO) << "before run " << endpoint; send_op->Run(scope, place); - LOG(INFO) << "end run"; auto in_var = scope.Var("x1"); auto tensor = in_var->GetMutable(); @@ -180,7 +176,6 @@ TEST(SendRecvOp, CPUDense) { for (int64_t i = 0; i < target->numel(); ++i) { EXPECT_EQ(expected[i] * 2, actual[i]); } - LOG(INFO) << "before stop"; listen_and_serv_op->Stop(); server_thread.join(); listen_and_serv_op.reset(nullptr); @@ -199,7 +194,6 @@ TEST(SendRecvOp, CPUSparse) { selected_port = static_cast( listen_and_serv_op.get()) ->GetSelectedPort(); - LOG(INFO) << "selected port " << selected_port; std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index dc2f1763446b2aaf72b20c72e8e37ec920abd120..d00bd1447e6114b6000b65799abb566a2a510127 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -73,6 +73,15 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { softmax_dst_memory); std::vector pipeline{softmax}; stream(stream::kind::eager).submit(pipeline).wait(); + + const bool is_test = ctx.Attr("is_test"); + if (!is_test) { + T threshold = exp(-64); + for (size_t i = 0; i < dst_tz[0] * dst_tz[1]; ++i) { + output_data[i] = + output_data[i] < threshold ? threshold : output_data[i]; + } + } } }; diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 6bdefc0f23910c90f3878d8f2634ca6e03c6f736..e1f286f9ba42ff22fffbfc012832dd751a37c1d0 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -97,6 +97,9 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); + AddAttr("is_test", + "Disable epsilon adding to softmax results. Used by MKLDNN.") + .SetDefault(false); AddComment(R"DOC( Softmax Operator. diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index 9f8482adedb4c29e32d4109941a2752d942ae49f..d44eeae8e6ff9ac87ab093d04e3f5427743f0c08 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -24,7 +24,6 @@ namespace paddle { namespace operators { using Tensor = framework::Tensor; -using LoDTensor = framework::LoDTensor; template @@ -36,9 +35,9 @@ class TopkKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { // Get the top k elements of each row of input tensor // FIXME: only deal with matrix(2d tensor). - auto* input = ctx.Input("X"); - auto* output = ctx.Output("Out"); - auto* indices = ctx.Output("Indices"); + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); // k is determined by Attr const size_t k = static_cast(ctx.Attr("k")); diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index ca9ab2c7aecff47924f0198802d710b7661f5576..0013597fd516d15c7d502370eec77e1a6a5dca88 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -39,20 +39,19 @@ inline ncclDataType_t ToNCCLDataType(std::type_index type) { class NCCLGroupGuard { public: + static std::mutex &NCCLMutex() { + static std::mutex mtx; + return mtx; + } + inline NCCLGroupGuard() { - mutex().lock(); + NCCLMutex().lock(); PADDLE_ENFORCE(dynload::ncclGroupStart()); } inline ~NCCLGroupGuard() { PADDLE_ENFORCE(dynload::ncclGroupEnd()); - mutex().unlock(); - } - - private: - static std::mutex &mutex() { - static std::mutex mtx; - return mtx; + NCCLMutex().unlock(); } }; @@ -68,26 +67,6 @@ struct NCCLContext { int device_id() const { return boost::get(ctx_->GetPlace()).device; } - - static void InitNCCLContext(std::unordered_map *contexts, - const std::vector &places) { - std::vector comms; - std::vector devs; - comms.resize(contexts->size()); - devs.reserve(contexts->size()); - - for (auto &p : places) { - devs.push_back(boost::get(p).device); - } - - PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( - &comms[0], static_cast(contexts->size()), &devs[0])); - - int i = 0; - for (auto &dev_id : devs) { - contexts->at(dev_id).comm_ = comms[i++]; - } - } }; struct NCCLContextMap { @@ -107,12 +86,12 @@ struct NCCLContextMap { "NCCL Context Map does not support contain two or more same device"); if (places.size() > 1) { - std::vector comms; - comms.resize(order_.size()); - - PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( - &comms[0], static_cast(order_.size()), &order_[0])); - + std::unique_ptr comms(new ncclComm_t[order_.size()]); + { + std::lock_guard guard(NCCLGroupGuard::NCCLMutex()); + PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( + comms.get(), static_cast(order_.size()), order_.data())); + } int i = 0; for (auto &dev_id : order_) { contexts_.at(dev_id).comm_ = comms[i++]; @@ -120,6 +99,9 @@ struct NCCLContextMap { } } + NCCLContextMap(const NCCLContextMap &other) = delete; + NCCLContextMap &operator=(const NCCLContextMap &other) = delete; + CUDADeviceContext *DevCtx(int dev_id) const { return at(dev_id).ctx_.get(); } CUDADeviceContext *DevCtx(platform::Place p) const { diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index f757411b853bacb9e03fc42fa2ef6593c3cde00f..e9ca0d45f98bd27692a15060310d4e8cd1e8b181 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -37,6 +37,7 @@ from distribute_transpiler import DistributeTranspiler from distribute_transpiler_simple import SimpleDistributeTranspiler from concurrency import (Go, make_channel, channel_send, channel_recv, channel_close, Select) +from inference_transpiler import InferenceTranspiler import clip from memory_optimization_transpiler import memory_optimize, release_memory import profiler @@ -66,6 +67,7 @@ __all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [ 'clip', 'SimpleDistributeTranspiler', 'DistributeTranspiler', + 'InferenceTranspiler', 'memory_optimize', 'release_memory', 'profiler', diff --git a/python/paddle/fluid/inference_transpiler.py b/python/paddle/fluid/inference_transpiler.py new file mode 100644 index 0000000000000000000000000000000000000000..39b01610f96018e1775405a30147e77006cecc16 --- /dev/null +++ b/python/paddle/fluid/inference_transpiler.py @@ -0,0 +1,240 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from framework import Program +from executor import global_scope +from . import core + + +class InferenceTranspiler: + def transpile(self, program, place, scope=None): + ''' + Transpile the program. Support only fuse batch normalization now. + + :param program: program to transpile + :type program: Program + :param place: inference place + :type place: Place + :param scope: inference scope + :type scope: Scope or None + ''' + if not isinstance(program, Program): + raise TypeError("program should be as Program type") + if not isinstance(place, core.CPUPlace) and not isinstance( + place, core.CUDAPlace): + raise TypeError("place should be as CPUPlace/CUDAPlace type") + if scope is None: + scope = global_scope() + if not isinstance(scope, core.Scope): + raise TypeError("scope should be as Scope type or None") + self.fuse_batch_norm(program, place, scope) + + def fuse_batch_norm(self, program, place, scope): + ''' + Transpile the program by fused batch normalization. + + The batch normalization followed the convolution or fully connected layer + can be integrated with them. Doing so will give us a forward acceleration, + especially in environments like mobile or embedded. + + For input X: + - Conv process: X = input * W + bias + - Batch norm process: X' = (X - mean) / std + - Scale Process: Y = a * X' + b + + After fuse into one operation: + + Y = (input * W + bias - mean) / std * a + b + = input * a * W / std + ((bias - mean) / std * a + b) + + The operator transformation is: + - before: + - conv->batch_norm->any_other_op (bias == 0) + - conv->elementwise_add->batch_norm->any_other_op (bias != 0) + - after: + - conv->elementwise_add->any_other_op + + The transpile stages are: + 1. insert elementwise_add op when bias == 0. + 2. fuse the batch_norm's parameters to conv and elementwise_add operators. + 3. remove batch_norm ops which are not used in any other ops. + 4. adjust the input of any_other_op to be the output of elementwise_add operator. + 5. remove unused variables. + + :param program: program to transpile + :type program: Program + :param place: inference place + :type place: Place + :param scope: inference scope + :type scope: Scope + ''' + self.scope = scope + self.place = place + self.block = program.block(0) + self.input_map = {} # store the input names should be adjusted + + i = 0 + while i < len(self.block.ops): + current_op = self.block.ops[i] + # TODO(luotao1): consider only conv2d now. fc would be delt later. + if current_op.type in ['conv2d']: + # TODO(luotao1): consider single chain network now. + # For branch network, we counldn't use block.ops[i + 1] as + # the judgment condition. + next_op = self.block.ops[i + 1] + # conv2d without bias + if (next_op.type == 'batch_norm'): + # insert bias op + bias_op = self._insert_bias_op(i + 1, current_op, next_op) + # fuse batch_norm + self._fuse_param(current_op, next_op, bias_op, 0) + # remove batch_norm_op + self.block.remove_op(i + 2) + i = i + 1 + # conv2d with bias, the next_op.type is elementwise_add + elif (next_op.type == 'elementwise_add'): + next_next_op = self.block.ops[i + 2] + if (next_next_op.type == 'batch_norm'): + # fuse batch_norm + self._fuse_param(current_op, next_next_op, next_op, 1) + # remove batch_norm_op + self.block.remove_op(i + 2) + i = i + 1 + i = i + 1 + + self._adjust_input() + self._remove_unused_var() + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + + # ====================== private transpiler functions ===================== + def _insert_bias_op(self, index, current_op, bn_op): + ''' + Construct elementwise_add operator for adding bias + and insert it into program. + + :param index: insert location of bias_op + :type index: Int + :param current_op: current operator (conv or fc) + :type current_op: Operator + :param bn_op: batch norm operator + :type bn_op: Operator + :return: bias_op + :rtype: Operator + ''' + # The input of bias_op is current_op's output and Bias of bn_op + # The output of bias_op is bn_op's output + x_var = self.block.var(current_op.output("Output")[0]) + y_var = self.block.var(bn_op.input("Bias")[0]) + out_var = self.block.var(bn_op.output("Y")[0]) + + bias_op = self.block.insert_op( + index, + type="elementwise_add", + inputs={"X": x_var, + "Y": y_var}, + outputs={"Out": out_var}, + attrs={"axis": 1}) # dim_start=1 + return bias_op + + def _fuse_param(self, current_op, bn_op, bias_op, with_bias): + ''' + fuse the batch_norm_op' parameters to current_op (conv or fc) + + :param current_op: current operator (conv or fc) + :type current_op: Operator + :param bn_op: batch norm operator + :type bn_op: Operator + :param bias_op: elementwise_add operator for adding bias + :type bias_op: Operator + :param with_bias: If current operator has bias, with_bias = 1; otherwise 0. + :type with_bias: Int + ''' + + def _update_param(op, old_param_name, new_param): + # For the sake of remaining the original variables the same as before, + # create new variables in scope to store the new parameters. + old_param_name = old_param_name[0] + old_var = self.block.vars[old_param_name] + new_param_name = old_param_name + '_fuse_bn' + new_var = self.block.create_parameter( + name=new_param_name.encode('ascii'), + type=old_var.type, + dtype=old_var.dtype, + shape=old_var.shape) + op.rename_input(old_param_name, new_param_name) + self.scope.var(new_param_name) + + tensor = self.scope.find_var(new_param_name).get_tensor() + tensor.set(np.array(new_param), self.place) + + def _load_param(param_name): + return np.array(self.scope.find_var(param_name[0]).get_tensor()) + + bias_bn = _load_param(bn_op.input("Bias")) #Bias + scale_bn = _load_param(bn_op.input("Scale")) #Scale + mean_bn = _load_param(bn_op.input("Mean")) #Mean + var_bn = _load_param(bn_op.input("Variance")) #Variance + + # TODO(luotao1): consider only conv2d now. fc would be delt later. + current_param = _load_param(current_op.input("Filter")) + std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5))) + tmp = np.float32(np.divide(scale_bn, std_bn)) + + # add bias of batch_norm_op to conv2d + if with_bias: + bias = _load_param(bias_op.input("Y")) + else: + bias = np.zeros(bias_bn.shape) + bias = np.float32( + np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn)) + + # re-compute weight of conv2d + tmp = tmp.reshape(tmp.shape[0], -1) + dst_param = current_param.reshape((tmp.shape[0], -1)) + dst_param = np.float32(np.multiply(dst_param, tmp)) + dst_param = dst_param.reshape(current_param.shape) + + # update parameters + _update_param(current_op, current_op.input("Filter"), dst_param) + _update_param(bias_op, bias_op.input("Y"), bias) + + # collect the renamed input + self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0] + + def _adjust_input(self): + for i in range(len(self.block.ops)): + current_op = self.block.ops[i] + for input_arg in current_op.input_arg_names: + if input_arg in self.input_map: + current_op.rename_input(input_arg, + self.input_map[input_arg]) + + def _remove_unused_var(self): + ''' + remove unused varibles in program + ''' + args = [] + for i in range(len(self.block.ops)): + current_op = self.block.ops[i] + args += current_op.input_arg_names + args += current_op.output_arg_names + args = list(set(args)) # unique the input and output arguments + + for var in self.block.vars.keys(): + if var not in args: + self.block.remove_var(var) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index b9a53eda9144e9e56cf9bc626db40cf4225bd87f..4b707973e27391a6bdcba138934f62a255e04bb2 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -32,7 +32,6 @@ __all__ = [ 'Switch', 'lod_rank_table', 'max_sequence_len', - 'topk', 'lod_tensor_to_array', 'array_to_lod_tensor', 'increment', @@ -751,43 +750,6 @@ def max_sequence_len(rank_table): return res -def topk(input, k): - """ - **topk** - - This function performs the operation that selects the k entries in the input - vector and outputs their values and indices as vectors. Thus topk_out[j] is - the j-th largest entry in input, and its index is topk_indices[j] - - Args: - input (Variable|list): The input tensor that has all the data. - k (int): The number of top elements that the function will pick. - - Returns: - Variable: The variable of type array that contains the k largest entries - from input. - Variable: The variable of type array that contains the indices of k - largest entries from input. - - Examples: - .. code-block:: python - - x = fluid.layers.data(name='x', shape=[10]) - k = 5 - array = fluid.layers.topk(x, k) - """ - helper = LayerHelper('topk', **locals()) - topk_out = helper.create_tmp_variable(dtype=input.dtype) - topk_indices = helper.create_tmp_variable(dtype='int64') - helper.append_op( - type='top_k', - inputs={'X': [input]}, - outputs={'Out': [topk_out], - 'Indices': [topk_indices]}, - attrs={'k': k}) - return topk_out, topk_indices - - def lod_tensor_to_array(x, table): """ Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY. diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index e7d6c4e2521bee133c4794ed1db669b02fc2152b..ead57ac370d1bec13c1b21e83dd4be1a7331f87e 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -13,7 +13,7 @@ # limitations under the License. from .. import core -from ..framework import convert_np_dtype_to_dtype_, default_main_program, default_startup_program +from ..framework import convert_np_dtype_to_dtype_, default_main_program, default_startup_program, Program from ..unique_name import generate as unique_name from control_flow import BlockGuard from ..layer_helper import LayerHelper @@ -158,6 +158,7 @@ class ListenAndServ(object): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() + empty_block = Program().global_block() parent_block.append_op( type='listen_and_serv', @@ -166,11 +167,12 @@ class ListenAndServ(object): attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, - 'OptimizeBlock': current_block + 'OptimizeBlock': current_block, + 'PrefetchBlock': empty_block }) -def Send(endpoints, send_vars, get_vars): +def Send(endpoints, send_vars, get_vars=None): """ Send layer @@ -184,7 +186,6 @@ def Send(endpoints, send_vars, get_vars): side when server have finished running server side program. """ assert (type(send_vars) == list) - assert (type(get_vars) == list) epmap = endpoints.split(",") endpoints = list(set(epmap)) @@ -192,6 +193,11 @@ def Send(endpoints, send_vars, get_vars): helper = LayerHelper("Send", **locals()) rpc_client_var = default_main_program().global_block().create_var( name="RPC_CLIENT_VAR", persistable=True, type=core.VarDesc.VarType.RAW) + if not get_vars: + get_vars = [] + for s in send_vars: + v = helper.create_tmp_variable(dtype=s.dtype, stop_gradient=True) + get_vars.append(v) helper.append_op( type="send", @@ -200,6 +206,7 @@ def Send(endpoints, send_vars, get_vars): "RPCClient": rpc_client_var}, attrs={"endpoints": endpoints, "epmap": epmap}) + return get_vars def Recv(endpoints, get_vars): diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 65b95a58d6546ed6d6b264443a7c802e16eef23f..d13c54daa5a985e2e1bf9357630fe29d24a17bb4 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -20,7 +20,7 @@ from ..initializer import init_on_cpu __all__ = [ 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', - 'polynomial_decay', 'piecewise_decay' + 'polynomial_decay', 'piecewise_decay', 'noam_decay' ] """ When training a model, it's often useful to decay the @@ -32,14 +32,41 @@ strategy according to this module. """ -def _decay_step_counter(): +def _decay_step_counter(begin=0): # the first global step is zero in learning rate decay global_step = nn.autoincreased_step_counter( - counter_name='@LR_DECAY_COUNTER@', begin=0, step=1) + counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1) global_step = tensor.cast(global_step, 'float32') return global_step +def noam_decay(d_model, warmup_steps): + """Apply decay to learning rate. + ```python + lr_value = np.power(d_model, -0.5) * np.min([ + np.power(current_steps, -0.5), + np.power(warmup_steps, -1.5) * current_steps + ]) + ``` + + Args: + d_model(Variable): The dimensionality of input and output of model. + Reference: attention is all you need + https://arxiv.org/pdf/1706.03762.pdf + warmup_steps(Variable): A super parameter. + + Returns: + The decayed learning rate. + """ + global_step = _decay_step_counter(1) + with init_on_cpu(): + a = global_step**-0.5 + b = (warmup_steps**-1.5) * global_step + lr_value = (d_model**-0.5) * ops.elementwise_min(a, b) + + return lr_value + + def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): """Applies exponential decay to the learning rate. diff --git a/python/paddle/fluid/layers/metric.py b/python/paddle/fluid/layers/metric.py index f66dccfa2d040ea0a9d29daeaa1d2da640525959..cab2eb55510542bdd4dd7eca7667601697759181 100644 --- a/python/paddle/fluid/layers/metric.py +++ b/python/paddle/fluid/layers/metric.py @@ -20,6 +20,7 @@ from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr +import nn __all__ = ['accuracy', 'auc'] @@ -27,17 +28,10 @@ __all__ = ['accuracy', 'auc'] def accuracy(input, label, k=1, correct=None, total=None): """ This function computes the accuracy using the input and label. - The output is the top_k inputs and their indices. + The output is the top k inputs and their indices. """ helper = LayerHelper("accuracy", **locals()) - topk_out = helper.create_tmp_variable(dtype=input.dtype) - topk_indices = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="top_k", - inputs={"X": [input]}, - outputs={"Out": [topk_out], - "Indices": [topk_indices]}, - attrs={"k": k}) + topk_out, topk_indices = nn.topk(input, k=k) acc_out = helper.create_tmp_variable(dtype="float32") if correct is None: correct = helper.create_tmp_variable(dtype="int64") @@ -68,12 +62,7 @@ def auc(input, label, curve='ROC', num_thresholds=200): helper = LayerHelper("auc", **locals()) topk_out = helper.create_tmp_variable(dtype=input.dtype) topk_indices = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="top_k", - inputs={"X": [input]}, - outputs={"Out": [topk_out], - "Indices": [topk_indices]}, - attrs={"k": k}) + topk_out, topk_indices = nn.topk(input, k=k) auc_out = helper.create_tmp_variable(dtype="float32") if correct is None: correct = helper.create_tmp_variable(dtype="int64") diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3c5923788f3a5e7962504b900959112848bbdfea..f962e62df5e0700e7c35945193d4273cf312ceab 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -60,6 +60,7 @@ __all__ = [ 'edit_distance', 'l2_normalize', 'matmul', + 'topk', 'warpctc', 'sequence_reshape', 'transpose', @@ -88,6 +89,7 @@ def fc(input, bias_attr=None, use_mkldnn=False, act=None, + is_test=False, name=None): """ **Fully Connected Layer** @@ -134,6 +136,7 @@ def fc(input, bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias of this layer. If it is set to None, no bias will be added to the output units. act (str, default None): Activation to be applied to the output of this layer. + is_test(bool): A flag indicating whether execution is in test phase. use_mkldnn(bool): Use mkldnn kernel or not, it is valid only when the mkldnn library is installed. Default: False name (str, default None): The name of this layer. @@ -2544,6 +2547,53 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): return out +def topk(input, k): + """ + This operator is used to find values and indices of the k largest entries + for the last dimension. + + If the input is a vector (rank=1), finds the k largest entries in the vector + and outputs their values and indices as vectors. Thus values[j] is the j-th + largest entry in input, and its index is indices[j]. + + If the input is a Tensor with higher rank, this operator computes the top k + entries along the last dimension. + + Args: + input(Variable): The input variable which can be a vector or Tensor with + higher rank. + k(int): An integer value to specify the top k largest elements. + + Returns: + values(Variable): The k largest elements along each last dimensional + slice. + indices(Variable): The indices of values within the last dimension of + input. + + Examples: + .. code-block:: python + + top5_values, top5_indices = layers.topk(input, k=5) + """ + shape = input.shape + if k < 1 and k >= shape[-1]: + raise ValueError("k must be greater than 0 and less than %d." % + (shape[-1])) + + helper = LayerHelper("top_k", **locals()) + values = helper.create_tmp_variable(dtype=input.dtype) + indices = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="top_k", + inputs={"X": [input]}, + outputs={"Out": [values], + "Indices": [indices]}, + attrs={"k": k}) + values.stop_gradient = True + indices.stop_gradient = True + return values, indices + + def edit_distance(input, label, normalized=True, ignored_tokens=None, name=None): """ @@ -2685,15 +2735,7 @@ def ctc_greedy_decoder(input, blank, name=None): cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) """ helper = LayerHelper("ctc_greedy_decoder", **locals()) - # top 1 op - topk_out = helper.create_tmp_variable(dtype=input.dtype) - topk_indices = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="top_k", - inputs={"X": [input]}, - outputs={"Out": [topk_out], - "Indices": [topk_indices]}, - attrs={"k": 1}) + _, topk_indices = topk(input, k=1) # ctc align op ctc_out = helper.create_tmp_variable(dtype="int64") diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 07cc1e29341bd497e88097a9ee5653631b79d734..fbdd6fd449625a21f91758dc12490b02070aea1a 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -16,6 +16,7 @@ import core import multiprocessing import framework import executor +import warnings import sys __all__ = ['ParallelExecutor'] @@ -62,8 +63,8 @@ class ParallelExecutor(object): main_program=test_program, share_vars_from=train_exe) - train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) - test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + train_loss, = train_exe.run([loss.name], feed=feed_dict) + test_loss, = test_exe.run([loss.name], feed=feed_dict) """ self._places = [] @@ -103,8 +104,8 @@ class ParallelExecutor(object): self.persistable_vars = [ v.name - for v in filter(lambda var: \ - var.persistable and var.type != core.VarDesc.VarType.RAW, + for v in filter( + lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW, main.list_vars()) ] @@ -163,7 +164,7 @@ class ParallelExecutor(object): Returns: fetched result list. """ - if feed is None: + if feed is None and feed_dict is not None: feed = feed_dict print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`" diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index e8bb082be196b6342b1719235f1264bbe3d776ac..0027b651e88b68950e77e03399b3987aa0120192 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -22,10 +22,17 @@ import sys import numpy import unittest import os +import numpy as np def resnet_cifar10(input, depth=32): - def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): + def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): tmp = fluid.layers.conv2d( input=input, filter_size=filter_size, @@ -33,7 +40,7 @@ def resnet_cifar10(input, depth=32): stride=stride, padding=padding, act=None, - bias_attr=False) + bias_attr=bias_attr) return fluid.layers.batch_norm(input=tmp, act=act) def shortcut(input, ch_in, ch_out, stride): @@ -44,7 +51,7 @@ def resnet_cifar10(input, depth=32): def basicblock(input, ch_in, ch_out, stride): tmp = conv_bn_layer(input, ch_out, 3, stride, 1) - tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None) + tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) short = shortcut(input, ch_in, ch_out, stride) return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') @@ -219,11 +226,26 @@ def infer(use_cuda, save_dirname=None): batch_size = 1 tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32") + # Use inference_transpiler to speedup + inference_transpiler_program = inference_program.clone() + t = fluid.InferenceTranspiler() + t.transpile(inference_transpiler_program, place) + # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) + + transpiler_results = exe.run(inference_transpiler_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + assert len(results[0]) == len(transpiler_results[0]) + for i in range(len(results[0])): + np.testing.assert_almost_equal( + results[0][i], transpiler_results[0][i], decimal=6) + print("infer results: ", results[0]) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 8f17eeea139c86055f0d4a06b21cbf66d8395cdc..d9190408e151283ece8460286dd67818dd39da3e 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -65,6 +65,7 @@ list(REMOVE_ITEM TEST_OPS test_registry) list(REMOVE_ITEM TEST_OPS test_fetch_var) list(REMOVE_ITEM TEST_OPS test_parallel_op) list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input) +list(REMOVE_ITEM TEST_OPS test_dist_train) # tests that can be bundled together in one python process for speed. if(WITH_FAST_BUNDLE_TEST) @@ -103,3 +104,4 @@ py_test_modules(test_registry MODULES test_registry) py_test_modules(test_fetch_var MODULES test_fetch_var) py_test_modules(test_dynrnn_static_input MODULES test_dynrnn_static_input) py_test_modules(test_parallel_op MODULES test_parallel_op) +py_test_modules(test_dist_train MODULES test_dist_train) diff --git a/python/paddle/fluid/tests/unittests/test_recv_op.py b/python/paddle/fluid/tests/unittests/test_dist_train.py similarity index 57% rename from python/paddle/fluid/tests/unittests/test_recv_op.py rename to python/paddle/fluid/tests/unittests/test_dist_train.py index 2ebceca7e4b7b824194d94180462870e6cfe6d21..c7fdd06f105e3b5fd906d3524d41df8f84160e63 100644 --- a/python/paddle/fluid/tests/unittests/test_recv_op.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -15,31 +15,42 @@ import unittest import paddle.fluid as fluid +import paddle.fluid.core as core import paddle.fluid.layers as layers import numpy from multiprocessing import Process +from threading import Thread import os, sys import time -class TestRecvOp(unittest.TestCase): - def no_test_send(self): +class TestSendOp(unittest.TestCase): + def test_send(self): # Run init_serv in a thread place = fluid.CPUPlace() + # NOTE: python thread will not work here due to GIL. p = Process(target=self.init_serv, args=(place, )) p.daemon = True p.start() - time.sleep(1) - self.init_client(place) + + time.sleep(10) + with open("/tmp/paddle.selected_port", "r") as fn: + selected_port = int(fn.readlines()[0]) + self.init_client(place, selected_port) + + self.run_local(place) + self.assertTrue(numpy.allclose(self.local_out, self.dist_out)) + # FIXME(typhoonzero): find a way to gracefully shutdown the server. os.system("kill -9 %d" % p.pid) p.join() def init_serv(self, place): main = fluid.Program() + with fluid.program_guard(main): serv = layers.ListenAndServ( - "127.0.0.1:6174", ["X"], optimizer_mode=False) + "127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): x = layers.data( shape=[32, 32], @@ -50,10 +61,29 @@ class TestRecvOp(unittest.TestCase): o = layers.scale(x=x, scale=10.0) main.global_block().create_var( name=o.name, psersistable=False, dtype=o.dtype, shape=o.shape) + + self.server_exe = fluid.Executor(place) + self.server_exe.run(main) + + def init_client(self, place, port): + main = fluid.Program() + with fluid.program_guard(main): + x = layers.data( + shape=[32, 32], + dtype='float32', + name='X', + append_batch_size=False) + fluid.initializer.Constant(value=2.3)(x, main.global_block()) + get_var = main.global_block().create_var( + name="scale_0.tmp_0", # server side var + dtype="float32", + persistable=False, + shape=[32, 32]) + o = layers.Send("127.0.0.1:%d" % port, [x], [get_var]) exe = fluid.Executor(place) - exe.run(main) + self.dist_out = exe.run(main, fetch_list=o) # o is a list - def init_client(self, place): + def run_local(self, place): main = fluid.Program() with fluid.program_guard(main): x = layers.data( @@ -61,10 +91,10 @@ class TestRecvOp(unittest.TestCase): dtype='float32', name='X', append_batch_size=False) - fluid.initializer.Constant(value=1.0)(x, main.global_block()) - layers.Send("127.0.0.1:6174", [x], [x]) + fluid.initializer.Constant(value=2.3)(x, main.global_block()) + o = layers.scale(x=x, scale=10.0) exe = fluid.Executor(place) - exe.run(main) + self.local_out = exe.run(main, fetch_list=[o]) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index a1be2d671ddc5c689b16319fcf5bf12dca5dde7e..17d6afdee161426e5da398ffa2ec148a027c905e 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -350,6 +350,15 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(smooth_label) print(str(program)) + def test_topk(self): + program = Program() + with program_guard(program): + data = layers.data(name="label", shape=[200], dtype="float32") + values, indices = layers.topk(data, k=5) + self.assertIsNotNone(values) + self.assertIsNotNone(indices) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index 3ddafbbc57b29d506158bcb57188ab96f814e0d3..c783a142467f3f6a9cd210425acfc526a32a6f71 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -200,14 +200,29 @@ class TestParallelExecutorBase(unittest.TestCase): def check_network_convergence(self, method, memory_opt=True, - iter=10, + iter=50, batch_size=None, allow_op_delay=False, - feed_dict=None): + feed_dict=None, + seed=None, + use_parallel_executor=True): + def run_executor(exe, feed, fetch_list, program=None): + if isinstance(exe, fluid.ParallelExecutor): + res = exe.run(fetch_list=fetch_list, feed=feed) + elif isinstance(exe, fluid.Executor): + if program is None: + program = fluid.default_main_program() + res = exe.run(program=program, feed=feed, fetch_list=fetch_list) + else: + raise ValueError('Unkown type exe') + return res + main = fluid.Program() startup = fluid.Program() startup.random_seed = 1 # Fix random seed with fluid.program_guard(main, startup): + if seed is not None: + startup.random_seed = seed loss = method(use_feed=feed_dict is not None) adam = fluid.optimizer.Adam() adam.minimize(loss) @@ -217,18 +232,24 @@ class TestParallelExecutorBase(unittest.TestCase): startup_exe = fluid.Executor(place) startup_exe.run(startup) - exe = fluid.ParallelExecutor( - True, loss_name=loss.name, allow_op_delay=allow_op_delay) + if use_parallel_executor: + exe = fluid.ParallelExecutor( + True, loss_name=loss.name, allow_op_delay=allow_op_delay) + else: + exe = fluid.Executor(place=place) + if batch_size is not None: batch_size *= fluid.core.get_cuda_device_count() begin = time.time() - first_loss, = exe.run([loss.name], feed=feed_dict) + first_loss, = run_executor( + exe=exe, feed=feed_dict, fetch_list=[loss.name]) first_loss = numpy.array(first_loss) for i in xrange(iter): - exe.run([], feed=feed_dict) + run_executor(exe=exe, feed=feed_dict, fetch_list=[]) - last_loss, = exe.run([loss.name], feed=feed_dict) + last_loss, = run_executor( + exe=exe, feed=feed_dict, fetch_list=[loss.name]) end = time.time() if batch_size is not None: @@ -239,6 +260,7 @@ class TestParallelExecutorBase(unittest.TestCase): print first_loss, last_loss # self.assertGreater(first_loss[0], last_loss[0]) + return first_loss, last_loss class TestMNIST(TestParallelExecutorBase): @@ -268,6 +290,27 @@ class TestMNIST(TestParallelExecutorBase): simple_fc_net, feed_dict={"image": img, "label": label}) + def test_simple_fc_parallel_accuracy(self): + img = numpy.zeros(shape=[32, 784], dtype='float32') + label = numpy.ones(shape=[32, 1], dtype='int64') + single_first_loss, single_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1000, + feed_dict={"image": img, + "label": label}, + use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1000, + feed_dict={"image": img, + "label": label}, + use_parallel_executor=True) + + for p_f in parallel_first_loss: + self.assertAlmostEquals(p_f, single_first_loss[0], delta=1e-6) + for p_l in parallel_last_loss: + self.assertAlmostEquals(p_l, single_last_loss[0], delta=1e-6) + def test_batchnorm_fc(self): self.check_network_convergence(fc_with_batchnorm) img = numpy.zeros(shape=[32, 784], dtype='float32') @@ -496,10 +539,10 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): share_vars_from=train_exe) for i in xrange(5): - test_loss, = test_exe.run([loss.name], feed_dict=feed_dict) + test_loss, = test_exe.run([loss.name], feed=feed_dict) test_loss = numpy.array(test_loss) - train_loss, = train_exe.run([loss.name], feed_dict=feed_dict) + train_loss, = train_exe.run([loss.name], feed=feed_dict) train_loss = numpy.array(train_loss) self.assertTrue( numpy.allclose(