diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index 1e2185dfac1072d1f1046f4616a9d53a8fc76061..400200c4745017bd9d160bb9e415fde041c0a6c8 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -159,6 +159,7 @@ def run_benchmark(model, args): paddle.dataset.mnist.train(), batch_size=args.batch_size) accuracy = fluid.metrics.Accuracy() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) iters, num_samples, start_time = 0, 0, time.time() for pass_id in range(args.pass_num): accuracy.reset() @@ -175,17 +176,20 @@ def run_benchmark(model, args): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([len(y_data), 1]) - outs = exe.run( - fluid.default_main_program(), + outs = train_exe.run( feed={"pixel": img_data, "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor] + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ] ) # The accuracy is the accumulation of batches, but not the current batch. - accuracy.update(value=outs[1], weight=outs[2]) + accuracy.update( + value=np.array(np.mean(outs[1])), + weight=np.mean(np.array(outs[2]))) iters += 1 num_samples += len(y_data) - loss = np.array(outs[0]) - acc = np.array(outs[1]) + loss = np.mean(np.array(outs[0])) + acc = np.mean(np.array(outs[1])) train_losses.append(loss) train_accs.append(acc) print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 831fa2c019fc2868cd85b1ca7b2c8c76a2f1628c..0fd7258a804e7c93b0b03da140140394bf90004a 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -241,6 +241,7 @@ def run_benchmark(model, args): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) accuracy = fluid.average.WeightedAverage() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) if args.use_fake_data: data = train_reader().next() image = np.array(map(lambda x: x[0].reshape(dshape), data)).astype( @@ -264,14 +265,17 @@ def run_benchmark(model, args): data)).astype('float32') label = np.array(map(lambda x: x[1], data)).astype('int64') label = label.reshape([-1, 1]) - loss, acc, weight = exe.run( - fluid.default_main_program(), + loss, acc, weight = train_exe.run( feed={'data': image, 'label': label}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ]) iters += 1 num_samples += len(label) - accuracy.add(value=acc, weight=weight) + accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight)) + loss = np.mean(np.array(loss)) + acc = np.mean(np.array(acc)) train_losses.append(loss) train_accs.append(acc) print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py index 53e34e0cbd15914791c305db6797f826ebfae34e..2a9566a45c3804183e05db9298cec4f670225a6f 100644 --- a/benchmark/fluid/vgg.py +++ b/benchmark/fluid/vgg.py @@ -169,6 +169,7 @@ def main(): iters, num_samples, start_time = 0, 0, time.time() accuracy = fluid.average.WeightedAverage() + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) for pass_id in range(args.pass_num): accuracy.reset() train_accs = [] @@ -184,14 +185,17 @@ def main(): y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([-1, 1]) - loss, acc, weight = exe.run( - fluid.default_main_program(), + loss, acc, weight = train_exe.run( feed={"pixel": img_data, "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) - accuracy.add(value=acc, weight=weight) + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ]) + accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight)) iters += 1 num_samples += len(y_data) + loss = np.mean(np.array(loss)) + acc = np.mean(np.array(acc)) print( "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % (pass_id, iters, loss, acc) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index edc93c2773f46ec9e0bf898557c55c93274e6a01..e029300eee9b99582f085f6b650e03f7dacc091a 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -21,11 +21,12 @@ else() ExternalProject_Add( extern_eigen3 ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" + GIT_REPOSITORY "https://github.com/eigenteam/eigen-git-mirror" # eigen on cuda9.1 missing header of math_funtions.hpp # https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c PREFIX ${EIGEN_SOURCE_DIR} + DOWNLOAD_NAME "eigen" UPDATE_COMMAND "" CONFIGURE_COMMAND "" BUILD_COMMAND "" diff --git a/cmake/external/snappy.cmake b/cmake/external/snappy.cmake index 80282329c6ac65fbd1493a6838efca4bd9cadaad..af09ed4d5d6e21cc50aba5198a7e9ea56f49451a 100644 --- a/cmake/external/snappy.cmake +++ b/cmake/external/snappy.cmake @@ -47,8 +47,6 @@ ExternalProject_Add( -DCMAKE_INSTALL_LIBDIR:PATH=${SNAPPY_INSTALL_DIR}/lib -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - BUILD_COMMAND make -j8 - INSTALL_COMMAND make install ) add_library(snappy STATIC IMPORTED GLOBAL) diff --git a/cmake/external/snappystream.cmake b/cmake/external/snappystream.cmake index 20a96430823d07a07d4bb4602e7fc0cfe55c3bf2..6df636d7fa8757ade73892bda03a80ba9767472b 100644 --- a/cmake/external/snappystream.cmake +++ b/cmake/external/snappystream.cmake @@ -46,8 +46,6 @@ ExternalProject_Add( -DCMAKE_INSTALL_PREFIX:PATH=${SNAPPYSTREAM_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR:PATH=${SNAPPYSTREAM_INSTALL_DIR}/lib -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - BUILD_COMMAND make -j8 - INSTALL_COMMAND make install DEPENDS snappy ) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index cc758019827b9a5416a801e4da43d754d4492a73..1144ca7f43b9ffd288b06672dbce82d5fae26347 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -70,6 +70,12 @@ copy(glog_lib DSTS ${dst_dir} ${dst_dir}/lib ) +set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/boost/") +copy(boost_lib + SRCS ${BOOST_INCLUDE_DIR}/boost + DSTS ${dst_dir} +) + if(NOT PROTOBUF_FOUND) set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") copy(protobuf_lib @@ -92,6 +98,14 @@ elseif (WITH_MKLML) ) endif() +if(WITH_MKLDNN) + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/mkldnn") + copy(mkldnn_lib + SRCS ${MKLDNN_INC_DIR} ${MKLDNN_SHARED_LIB} + DSTS ${dst_dir} ${dst_dir}/lib + ) +endif() + if(NOT MOBILE_INFERENCE AND NOT RPI) set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappy") copy(snappy_lib @@ -142,4 +156,10 @@ copy(string_lib DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat ) +set(module "pybind") +copy(pybind_lib + SRCS ${CMAKE_CURRENT_BINARY_DIR}/paddle/fluid/${module}/pybind.h + DSTS ${dst_dir}/${module} +) + add_custom_target(inference_lib_dist DEPENDS ${inference_lib_dist_dep}) diff --git a/doc/fluid/design/concepts/functions_operators_layers.md b/doc/fluid/design/concepts/functions_operators_layers.md index 30bc488a18a28d349645d9d2502aae6691a69931..1f86b99e5197c3e0b85fd76fe704520ef21b06d3 100644 --- a/doc/fluid/design/concepts/functions_operators_layers.md +++ b/doc/fluid/design/concepts/functions_operators_layers.md @@ -40,7 +40,7 @@ template class FCOp : public OperatorBase { public: void Run(...) { - add(mul(Input("X"), Input("W")), Input("b"); + add(mul(Input("X"), Input("W")), Input("b")); } }; REGISTER_OP(FCOp, "fc"); diff --git a/doc/fluid/design/dist_train/async_update.md b/doc/fluid/design/dist_train/async_update.md index 6a0835b761b69030ba30697e6e8863928efbf57f..248d2ec18dafdecac9184527638754b6ba4d85b8 100644 --- a/doc/fluid/design/dist_train/async_update.md +++ b/doc/fluid/design/dist_train/async_update.md @@ -4,34 +4,37 @@ For the typical synchronous distributed training, some significant steps are as follows: -1. A Trainer will compute the gradients and SEND them to the Parameter Server(PServer) nodes. -1. After the PServer node received gradients came from all the Trainers, It will aggregate the +1. A trainer process will compute the gradients and **send** them to the parameter server (PS) nodes. +1. After the PS node received gradients came from all the Trainers, It will aggregate the gradient variables for the same parameter into one gradient variable and then apply the aggregated gradient to the respective parameter, finally using an optimize algorithms(SGD, Monument...) to update the parameters. -1. The Trainer would wait for the PServers finished the optimize stage, and GET the parameters from PServer, +1. The Trainer would wait for the PS finished the optimize stage, and GET the parameters from PS, so all the Trainers would get the same parameters. -In the synchronously distributed training, there should be a `Barrier` to synchronise the -parameters after the optimizing stage. The performance of a distributed training job would -depend on the slowest node if there were hundreds or thousands of training nodes in a -Job, the performance of synchronously distributed training might be very poor because of -the slow node. So this design doc would introduce an approach to implement -*asynchronously* distributed training in PaddlePaddle Fluid. +In Synchronous Distributed Training, there is a **barrier** on each PS to wait until all trainers processes +have completed running current mini-batch. After that, all trainers can continue to run the next +mini-batch. So, we can find that the overall performance of Synchronous Distributed Training depends +on the slowest node. + +In Asynchronous Distributed Training, we don't need to wait for a global mini-bach, the optimizer on +the PS will run immediately when the gradient is uploaded to the PS from one trainer. This mode would +train such models that achieve scaling, better throughput. In this design doc, we will introduce how to +implement the Asynchronous Distributed Training base on PaddlePaddle Fluid. ## Design -As the figure above, we describe a global view of asynchronously update process and use +As the figure above, we describe a global view of the asynchronous update process and use the parameter `w1` as an example to introduce the steps: 1. For each gradient variables, they may distribute on different GPU card and aggregate them while they are all calculated. -1. Split the gradient variable into multiple blocks according to the number of PServer +1. Split the gradient variable into multiple blocks according to the number of PS instances and then send them. -1. PServer would run an `Optimize Block` using a specified optimize algorithm to update +1. PS would run an `Optimize Block` using a specified optimize algorithm to update the specified parameter. -1. The trainer will fetch latest parameter from PServer before running forward Op which depends +1. The trainer will fetch the latest parameter from PS before running forward Op which depends on the specified parameter. 1. Broadcast the received variable into multiple GPU cards and continue to run the next mini-batch. @@ -40,8 +43,8 @@ mini-batch. - For the multiple devices distributed training, we need to aggregate the gradient variables which placed on different devices firstly and then schedule a `SendVars` Operator to -send the gradient variables to the multiple PServer instances. -- Schedule `FetchVars` operator to fetch the latest parameter from PServer before running +send the gradient variables to the multiple PS instances. +- Schedule `FetchVars` operator to fetch the latest parameter from PS before running the forward ops. - There could be a large number of gradient variables to be sent, so we need to use another thread pool(IO Threadpool) whose a number of the schedulable threads is larger than the diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 8b1ca5e16548334ed0c9a6d31b88e0805304579e..d722eec1892206ac44c49e7a12d92be0c54df8c0 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -24,6 +24,6 @@ if(NOT WITH_FLUID_ONLY) endif() add_subdirectory(testing) -if(NOT MOBILE_INFERENCE AND NOT RPI) +if(NOT MOBILE_INFERENCE AND NOT RPI AND NOT WITH_C_API) add_subdirectory(fluid) endif() diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index ab71e0e63ce18e4f221a046eeb2c39499c1c3816..ed1e70c6460b513c1d2e1add18ac037f71d36944 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -5,11 +5,11 @@ proto_library(framework_proto SRCS framework.proto) cc_library(ddim SRCS ddim.cc DEPS eigen3 boost) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) - +cc_library(data_type SRCS data_type.cc DEPS framework_proto ddim device_context) if(WITH_GPU) - nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place memory device_context framework_proto) + nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS place memory data_type) else() - cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place memory device_context framework_proto) + cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS place memory data_type) endif() cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) diff --git a/paddle/fluid/framework/data_type.cc b/paddle/fluid/framework/data_type.cc new file mode 100644 index 0000000000000000000000000000000000000000..b6b93cf422a60c1d8e9cb8b477efd562f9fe4758 --- /dev/null +++ b/paddle/fluid/framework/data_type.cc @@ -0,0 +1,102 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/data_type.h" +#include +#include +#include + +namespace paddle { +namespace framework { + +struct DataTypeMap { + std::unordered_map cpp_to_proto_; + std::unordered_map proto_to_cpp_; + std::unordered_map proto_to_str_; + std::unordered_map cpp_to_size_; +}; + +static DataTypeMap* InitDataTypeMap(); +static DataTypeMap& gDataTypeMap() { + static DataTypeMap* g_data_type_map_ = InitDataTypeMap(); + return *g_data_type_map_; +} + +template +static inline void RegisterType(DataTypeMap* map, + proto::VarType::Type proto_type, + const std::string& name) { + map->proto_to_cpp_.emplace(static_cast(proto_type), typeid(T)); + map->cpp_to_proto_.emplace(typeid(T), proto_type); + map->proto_to_str_.emplace(static_cast(proto_type), name); + map->cpp_to_size_.emplace(typeid(T), sizeof(T)); +} + +static DataTypeMap* InitDataTypeMap() { + auto retv = new DataTypeMap(); + +#define RegType(cc_type, proto_type) \ + RegisterType(retv, proto_type, #cc_type) + + // NOTE: Add your customize type here. + RegType(platform::float16, proto::VarType::FP16); + RegType(float, proto::VarType::FP32); + RegType(double, proto::VarType::FP64); + RegType(int, proto::VarType::INT32); + RegType(int64_t, proto::VarType::INT64); + RegType(bool, proto::VarType::BOOL); + RegType(size_t, proto::VarType::SIZE_T); + RegType(int16_t, proto::VarType::INT16); + RegType(uint8_t, proto::VarType::UINT8); + +#undef RegType + return retv; +} + +proto::VarType::Type ToDataType(std::type_index type) { + auto it = gDataTypeMap().cpp_to_proto_.find(type); + if (it != gDataTypeMap().cpp_to_proto_.end()) { + return it->second; + } + PADDLE_THROW("Not support %s as tensor type", type.name()); +} + +std::type_index ToTypeIndex(proto::VarType::Type type) { + auto it = gDataTypeMap().proto_to_cpp_.find(static_cast(type)); + if (it != gDataTypeMap().proto_to_cpp_.end()) { + return it->second; + } + PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", + static_cast(type)); +} + +std::string DataTypeToString(const proto::VarType::Type type) { + auto it = gDataTypeMap().proto_to_str_.find(static_cast(type)); + if (it != gDataTypeMap().proto_to_str_.end()) { + return it->second; + } + PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", + static_cast(type)); +} + +size_t SizeOfType(std::type_index type) { + auto it = gDataTypeMap().cpp_to_size_.find(type); + if (it != gDataTypeMap().cpp_to_size_.end()) { + return it->second; + } + PADDLE_THROW("Not support %s as tensor type", type.name()); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 2a528eb3aa562568c92059250f2c9bc5a75ec103..491413db8c8d66fd907801131e89d9303bdef9f2 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -17,51 +17,14 @@ limitations under the License. */ #include #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/platform/enforce.h" + #include "paddle/fluid/platform/float16.h" namespace paddle { namespace framework { -inline proto::VarType::Type ToDataType(std::type_index type) { - if (typeid(platform::float16).hash_code() == type.hash_code()) { - return proto::VarType::FP16; - } else if (typeid(const float).hash_code() == type.hash_code()) { - // CPPLint complains Using C-style cast. Use static_cast() instead - // One fix to this is to replace float with const float because - // typeid(T) == typeid(const T) - // http://en.cppreference.com/w/cpp/language/typeid - return proto::VarType::FP32; - } else if (typeid(const double).hash_code() == type.hash_code()) { - return proto::VarType::FP64; - } else if (typeid(const int).hash_code() == type.hash_code()) { - return proto::VarType::INT32; - } else if (typeid(const int64_t).hash_code() == type.hash_code()) { - return proto::VarType::INT64; - } else if (typeid(const bool).hash_code() == type.hash_code()) { - return proto::VarType::BOOL; - } else { - PADDLE_THROW("Not supported"); - } -} - -inline std::type_index ToTypeIndex(proto::VarType::Type type) { - switch (type) { - case proto::VarType::FP16: - return typeid(platform::float16); - case proto::VarType::FP32: - return typeid(float); - case proto::VarType::FP64: - return typeid(double); - case proto::VarType::INT32: - return typeid(int); - case proto::VarType::INT64: - return typeid(int64_t); - case proto::VarType::BOOL: - return typeid(bool); - default: - PADDLE_THROW("Not support type %d", type); - } -} +extern proto::VarType::Type ToDataType(std::type_index type); +extern std::type_index ToTypeIndex(proto::VarType::Type type); template inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { @@ -84,37 +47,23 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { case proto::VarType::BOOL: visitor.template operator()(); break; - default: - PADDLE_THROW("Not supported"); - } -} - -inline std::string DataTypeToString(const proto::VarType::Type type) { - switch (type) { - case proto::VarType::FP16: - return "float16"; - case proto::VarType::FP32: - return "float32"; - case proto::VarType::FP64: - return "float64"; + case proto::VarType::UINT8: + visitor.template operator()(); + break; case proto::VarType::INT16: - return "int16"; - case proto::VarType::INT32: - return "int32"; - case proto::VarType::INT64: - return "int64"; - case proto::VarType::BOOL: - return "bool"; + visitor.template operator()(); + break; default: - PADDLE_THROW("Not support type %d", type); + PADDLE_THROW("Not supported %d", type); } } +extern std::string DataTypeToString(const proto::VarType::Type type); +extern size_t SizeOfType(std::type_index type); inline std::ostream& operator<<(std::ostream& out, const proto::VarType::Type& type) { out << DataTypeToString(type); return out; } - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h new file mode 100644 index 0000000000000000000000000000000000000000..91bdfe6134ffbd1404336c9d6d1222a505084b2b --- /dev/null +++ b/paddle/fluid/framework/details/build_strategy.h @@ -0,0 +1,36 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace paddle { +namespace framework { +namespace details { + +struct BuildStrategy { + enum class ReduceStrategy { kAllReduce = 0, kReduce = 1 }; + + enum class GradientScaleStrategy { + kCoeffNumDevice = 0, + kOne = 1, + kCustomized = 2, + }; + + ReduceStrategy reduce_{ReduceStrategy::kAllReduce}; + GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice}; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h new file mode 100644 index 0000000000000000000000000000000000000000..e8d510ec955602b5a3f73ca06caa121886eb150b --- /dev/null +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -0,0 +1,29 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace paddle { +namespace framework { +namespace details { + +struct ExecutionStrategy { + size_t num_threads_{0}; + bool use_event_{true}; + bool allow_op_delay_{false}; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index b1c9dd0d15223f7d1bf6ea44144589f1de927e3e..224e8e1f6efd7a894591ac51c929517cae7539ce 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -48,17 +48,18 @@ void FetchOpHandle::RunImpl() { WaitInputVarGenerated(platform::CPUPlace()); tensors_.resize(inputs_.size()); - auto *var_handle = static_cast(inputs_[0]); - auto &var_name = var_handle->name_; platform::CPUPlace cpu; auto &scopes = *local_scopes_; - for (size_t i = 0; i < scopes.size(); ++i) { - auto &scope = scopes[i]; - auto *var = - scope->FindVar(kLocalExecScopeName)->Get()->FindVar(var_name); + for (size_t i = 0; i < inputs_.size(); ++i) { + auto *var_handle = static_cast(inputs_[i]); + auto &scope = scopes.at(var_handle->scope_idx_); + auto *var = scope->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(var_handle->name_); PADDLE_ENFORCE_NOT_NULL(var, "Cannot find variable %s in execution scope", - var_name); + var_handle->name_); + auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 4755559f8d0c5b5fdeb6b56a28fff8a32ea7f82f..45bad58145a1144dfabdd3e15b38d972d57b105e 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -37,31 +37,26 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::string &loss_var_name, const std::unordered_set ¶ms, const std::vector &local_scopes, - platform::NCCLContextMap *nccl_ctxs, bool use_default_grad_scale, - bool balance_parameter_opt_between_cards) + platform::NCCLContextMap *nccl_ctxs, const BuildStrategy &strategy) : loss_var_name_(loss_var_name), places_(places), local_scopes_(local_scopes), nccl_ctxs_(nccl_ctxs), - balance_parameter_opt_between_cards_( - balance_parameter_opt_between_cards) { + strategy_(strategy) { #else MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, - const std::vector &local_scopes, bool use_default_grad_scale, - bool balance_parameter_opt_between_cards) + const std::vector &local_scopes, const BuildStrategy &strategy) : loss_var_name_(loss_var_name), places_(places), local_scopes_(local_scopes), - balance_parameter_opt_between_cards_( - balance_parameter_opt_between_cards) { + strategy_(strategy) { #endif for (auto &p : params) { grad_names_.insert(GradVarName(p)); } - use_default_grad_scale_ = use_default_grad_scale; } void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, @@ -146,7 +141,8 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( CreateComputationalOps(&result, *op, 1); } else if (IsScaleLossOp(*op)) { // user can customize loss@grad if not use_default_grad_scale_ - if (use_default_grad_scale_) { + if (strategy_.gradient_scale_ != + BuildStrategy::GradientScaleStrategy::kCustomized) { CreateScaleLossGradOp(&result); } is_forwarding = false; @@ -165,19 +161,22 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( // broadcast, and each gradient is only broadcast once. for (auto &og : op->OutputArgumentNames()) { if (IsParameterGradientOnce(og, &og_has_been_broadcast)) { - if (balance_parameter_opt_between_cards_) { - CreateReduceOp(&result, og, cur_device_id); - var_name_on_devices[cur_device_id].emplace(og); - bcast_var_name_set[cur_device_id].emplace( - og.substr(0, og.size() - strlen(kGradVarSuffix))); - cur_device_id = (cur_device_id + 1) % places_.size(); - } else { - if (IsSparseGradient(var_types, og)) { - CreateReduceOp(&result, og, 0); - CreateBroadcastOp(&result, og, 0); - } else { - InsertNCCLAllReduceOp(&result, og); - } + switch (strategy_.reduce_) { + case BuildStrategy::ReduceStrategy::kReduce: + CreateReduceOp(&result, og, cur_device_id); + var_name_on_devices[cur_device_id].emplace(og); + bcast_var_name_set[cur_device_id].emplace( + og.substr(0, og.size() - strlen(kGradVarSuffix))); + cur_device_id = (cur_device_id + 1) % places_.size(); + break; + case BuildStrategy::ReduceStrategy::kAllReduce: + if (IsSparseGradient(var_types, og)) { + CreateReduceOp(&result, og, 0); + CreateBroadcastOp(&result, og, 0); + } else { + InsertNCCLAllReduceOp(&result, og); + } + break; } } } @@ -303,7 +302,7 @@ bool MultiDevSSAGraphBuilder::IsParameterGradientOnce( int MultiDevSSAGraphBuilder::GetOpDeviceID( const std::vector> &var_name_on_devices, const OpDesc &op) const { - if (!balance_parameter_opt_between_cards_) { + if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) { return -1; } diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h index 3a3e9e3b8538f52962e6a5ccd1a177e58d6c2f6b..4f708521884247fc013f0ae336ab683c3fe7ef2f 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.h @@ -17,6 +17,7 @@ #include #include +#include "paddle/fluid/framework/details/build_strategy.h" #include "paddle/fluid/framework/details/ssa_graph_builder.h" namespace paddle { @@ -36,15 +37,13 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { const std::unordered_set ¶ms, const std::vector &local_scopes, platform::NCCLContextMap *nccl_ctxs, - bool use_default_grad_scale, - bool balance_parameter_opt_between_cards); + const BuildStrategy &strategy); #else MultiDevSSAGraphBuilder(const std::vector &places, const std::string &loss_var_name, const std::unordered_set ¶ms, const std::vector &local_scopes, - bool use_default_grad_scale, - bool balance_parameter_opt_between_cards); + const BuildStrategy &strategy); #endif std::unique_ptr Build(const ProgramDesc &program) const override; @@ -62,8 +61,6 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { #ifdef PADDLE_WITH_CUDA platform::NCCLContextMap *nccl_ctxs_; #endif - bool balance_parameter_opt_between_cards_; - bool use_default_grad_scale_; bool IsScaleLossOp(const OpDesc &op) const; @@ -105,6 +102,9 @@ class MultiDevSSAGraphBuilder : public SSAGraphBuilder { bool IsSparseGradient( const std::unordered_map &var_types, const std::string &og) const; + + private: + BuildStrategy strategy_; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index fe1735d05dde5f09d5c72c68e5002d16f0083eb5..8f94206a87dbae8a81727ca48718886bbabbe25c 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -70,6 +70,14 @@ class OpHandleBase { const std::vector &Inputs() const { return inputs_; } + size_t NoDupInputSize() const { + std::unordered_set res; + for (auto *var : inputs_) { + res.emplace(var); + } + return res.size(); + } + const std::vector &Outputs() const { return outputs_; } protected: diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index e90523ebe8dc720d10034e3af9b0e51bb7a2fde9..815f739371e77d953a28be99b38ec1b8ff26506c 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -18,18 +18,17 @@ namespace paddle { namespace framework { namespace details { ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( - size_t num_threads, bool use_event, - const std::vector &local_scopes, + const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::unique_ptr &&graph, bool allow_op_delay) + std::unique_ptr &&graph) : SSAGraphExecutor(std::move(graph)), - pool_(num_threads >= 2 ? new ::ThreadPool(num_threads) : nullptr), + pool_(strategy.num_threads_ >= 2 ? new ::ThreadPool(strategy.num_threads_) + : nullptr), local_scopes_(local_scopes), places_(places), fetch_ctxs_(places), - use_event_(use_event), running_ops_(0), - allow_op_delay_(allow_op_delay) {} + strategy_(strategy) {} FeedFetchList ThreadedSSAGraphExecutor::Run( const std::vector &fetch_tensors) { @@ -86,7 +85,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // // NOTE: DelayedOps have a lower priority. It will be scheduled after all // ready_ops have been performed. - if (ready_ops.empty() && allow_op_delay_ && running_ops_ == 0) { + if (ready_ops.empty() && strategy_.allow_op_delay_ && running_ops_ == 0) { run_all_ops(delayed_ops); } else { run_all_ops(ready_ops); @@ -113,7 +112,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( auto &deps = pending_ops[op]; --deps; if (deps == 0) { - if (op->IsMultiDeviceTransfer() && allow_op_delay_) { + if (op->IsMultiDeviceTransfer() && strategy_.allow_op_delay_) { delayed_ops.insert(op); } else { ready_ops.insert(op); @@ -175,7 +174,7 @@ void ThreadedSSAGraphExecutor::InsertFetchOps( void ThreadedSSAGraphExecutor::InsertPendingOp( std::unordered_map *pending_ops, OpHandleBase *op_instance) const { - pending_ops->insert({op_instance, op_instance->Inputs().size()}); + pending_ops->insert({op_instance, op_instance->NoDupInputSize()}); } void ThreadedSSAGraphExecutor::InsertPendingVar( @@ -191,7 +190,7 @@ void ThreadedSSAGraphExecutor::RunOp( auto op_run = [ready_var_q, op, this] { try { VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); - op->Run(use_event_); + op->Run(strategy_.use_event_); VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index f18a88526b3238220fc56fd07299643d32c8b58b..1f7f88d75218e757e4555ad093f3cd6558f624dd 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -23,6 +23,7 @@ #include #include "ThreadPool.h" // ThreadPool in thrird party #include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/details/execution_strategy.h" #include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/ssa_graph_executor.h" @@ -34,11 +35,10 @@ namespace details { class ThreadedSSAGraphExecutor : public SSAGraphExecutor { public: - ThreadedSSAGraphExecutor(size_t num_threads, bool use_event, + ThreadedSSAGraphExecutor(const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::unique_ptr &&graph, - bool allow_op_delay); + std::unique_ptr &&graph); // Run a SSAGraph by a thread pool // Use topological sort algorithm @@ -55,10 +55,8 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::vector local_scopes_; std::vector places_; platform::DeviceContextPool fetch_ctxs_; - const bool use_event_; std::unique_ptr exception_; std::atomic running_ops_; - bool allow_op_delay_; void InsertPendingOp(std::unordered_map *pending_ops, OpHandleBase *op_instance) const; @@ -74,6 +72,9 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { std::unordered_map *pending_ops, std::unordered_set *pending_vars, BlockingQueue *ready_vars, FeedFetchList *fetch_data); + + private: + ExecutionStrategy strategy_; }; } // namespace details diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 96f53dc1bc8747e1b8ea84166614f98ff363ae5e..d35125fe8c3c8018c38650dc87b2b1474ded6058 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -101,6 +101,9 @@ message VarType { FP16 = 4; FP32 = 5; FP64 = 6; + // Tensor is used in C++. + SIZE_T = 19; + UINT8 = 20; // Other types that may need additional descriptions LOD_TENSOR = 7; diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 77e5ec4c7dd14b7ebb6d606b8c401ee714259d40..2ceffc93319359683e87e7fec2d18784c9bf02f3 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -228,11 +228,12 @@ TEST(LoD, CheckAbsLoD) { ASSERT_FALSE(CheckAbsLoD(abs_lod0)); } -TEST(LoDTensor, RecordIO) { +template +static void TestRecordIO() { LoDTensor tensor; - int* tmp = tensor.mutable_data(make_ddim({4, 5}), platform::CPUPlace()); + T* tmp = tensor.mutable_data(make_ddim({4, 5}), platform::CPUPlace()); for (int i = 0; i < 20; ++i) { - tmp[i] = i; + tmp[i] = static_cast(i); } std::stringstream* stream = new std::stringstream(); @@ -247,7 +248,7 @@ TEST(LoDTensor, RecordIO) { auto assert_tensor_ok = [](const LoDTensor& tensor) { for (int i = 0; i < 20; ++i) { - ASSERT_EQ(tensor.data()[i], i); + ASSERT_EQ(tensor.data()[i], static_cast(i)); } }; @@ -265,5 +266,13 @@ TEST(LoDTensor, RecordIO) { } } +TEST(LoDTensor, RecordIO) { + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc index d37ce149ce3df63692b41289bb03448d54e392f5..db95861c510b52a5b52229541434e6437d3fb9f4 100644 --- a/paddle/fluid/framework/op_kernel_type_test.cc +++ b/paddle/fluid/framework/op_kernel_type_test.cc @@ -27,7 +27,7 @@ TEST(OpKernelType, ToString) { LibraryType::kCUDNN); ASSERT_EQ(paddle::framework::KernelTypeToString(op_kernel_type), - "data_type[float32]:data_layout[NCHW]:place[CPUPlace]:library_type[" + "data_type[float]:data_layout[NCHW]:place[CPUPlace]:library_type[" "CUDNN]"); } diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index a4eb6f706edab9479cbce436311eb96da8845646..2f480e00c100d579e100de17d3feb957f5ef6167 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -33,7 +33,6 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/variant.h" -#include "paddle/utils/Error.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 95e807c0afa45bc4f4feb84d450b2d0584bc3b28..50c3468d556bfe05d6c41906cf35cb671f711b1e 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -52,13 +52,12 @@ std::vector &ParallelExecutor::GetLocalScopes() { } ParallelExecutor::ParallelExecutor( - size_t num_threads, bool use_event, const std::vector &places, const std::unordered_set ¶ms, const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, - Scope *scope, const std::vector &local_scopes, bool allow_op_delay, - bool use_default_grad_scale, bool balance_parameter_opt_between_cards, + Scope *scope, const std::vector &local_scopes, + const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy, size_t num_trainers, size_t trainer_id) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; @@ -100,18 +99,16 @@ ParallelExecutor::ParallelExecutor( #ifdef PADDLE_WITH_CUDA details::MultiDevSSAGraphBuilder builder( member_->places_, loss_var_name, params, member_->local_scopes_, - member_->nccl_ctxs_.get(), use_default_grad_scale, - balance_parameter_opt_between_cards); + member_->nccl_ctxs_.get(), build_strategy); #else - details::MultiDevSSAGraphBuilder builder( - member_->places_, loss_var_name, params, member_->local_scopes_, - use_default_grad_scale, balance_parameter_opt_between_cards); + details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, + params, member_->local_scopes_, + build_strategy); #endif auto graph = builder.Build(main_program); member_->executor_.reset(new details::ThreadedSSAGraphExecutor( - num_threads, use_event, member_->local_scopes_, places, std::move(graph), - allow_op_delay)); + exec_strategy, member_->local_scopes_, places, std::move(graph))); // Step 3. Create vars in each scope; for (auto *var : main_program.Block(0).AllVars()) { diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 9e279876cfeef20a1921f8bd1c27046a477b9f56..5247e790649e76567f4527d54499d6e95dac5c27 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -14,57 +14,60 @@ limitations under the License. */ #pragma once +#include #include #include #include +#include "paddle/fluid/framework/details/execution_strategy.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" - namespace paddle { namespace framework { class ParallelExecutorPrivate; +using details::BuildStrategy; +using details::ExecutionStrategy; + class ParallelExecutor { DISABLE_COPY_AND_ASSIGN(ParallelExecutor); public: - explicit ParallelExecutor(size_t num_threads, bool use_event, - const std::vector& places, - const std::unordered_set& params, - const std::unordered_set& bcast_vars, - const ProgramDesc& main_program, - const std::string& loss_var_name, Scope* scope, - const std::vector& local_scopes, - bool allow_op_delay, bool use_default_grad_scale, - bool balance_parameter_opt_between_cards, + explicit ParallelExecutor(const std::vector &places, + const std::unordered_set ¶ms, + const std::unordered_set &bcast_vars, + const ProgramDesc &main_program, + const std::string &loss_var_name, Scope *scope, + const std::vector &local_scopes, + const ExecutionStrategy &exec_strategy, + const BuildStrategy &build_strategy, size_t num_trainers = 1, size_t trainer_id = 0); ~ParallelExecutor(); - std::vector& GetLocalScopes(); + std::vector &GetLocalScopes(); /** * Feed tensors to local scopes. The size of tensors should be equal to the * size of local scopes. */ void FeedTensorsIntoLocalScopes( - const std::vector>& tensors); + const std::vector> &tensors); void FeedAndSplitTensorIntoLocalScopes( - const std::unordered_map& tensors); + const std::unordered_map &tensors); - void Run(const std::vector& fetch_tensors, - const std::string& fetched_var_name); + void Run(const std::vector &fetch_tensors, + const std::string &fetched_var_name); - void BCastParamsToGPUs(const std::unordered_set& vars) const; + void BCastParamsToGPUs(const std::unordered_set &vars) const; private: - ParallelExecutorPrivate* member_; + ParallelExecutorPrivate *member_; }; } // namespace framework diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index f49d1a47a325b2aac6185073203df124be18b54d..0a1db7758bd9ec0dac133efcbf495de1d690021d 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -13,54 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace framework { - -template -struct SizeOfTypeFunctor; - -template -struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { - if (typeid(T).hash_code() == type.hash_code()) { - return sizeof(T); - } else { - return 0UL; - } - } -}; - -template <> -struct SizeOfTypeFunctor<> { - size_t operator()(std::type_index type) const { return 0UL; } -}; - -template -struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { - SizeOfTypeFunctor head; - size_t head_size = head(type); - if (head_size != 0) { - return head_size; - } - SizeOfTypeFunctor tail; - return tail(type); - } -}; - -static inline size_t SizeOfType(std::type_index type) { - SizeOfTypeFunctor - functor; - size_t size = functor(type); - PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); - return size; -} - +extern size_t SizeOfType(std::type_index type); inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); diff --git a/paddle/fluid/inference/analysis/device.h b/paddle/fluid/inference/analysis/device.h index d77cf799164234d45d5a0e20153748290f7bf584..9fad445edecf09a45551d8db9ce530329037cebc 100644 --- a/paddle/fluid/inference/analysis/device.h +++ b/paddle/fluid/inference/analysis/device.h @@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once #pragma once diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index abc9ebf472498f6653d5bb1113ae2f3ce7e5a923..1cd3ed9a00acead2599420f88499bd0d74c2974b 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -49,7 +49,7 @@ class OpConverter { // convert fluid block to tensorrt network void ConvertBlock(const framework::proto::BlockDesc& block, TensorRTEngine* engine) { - for (size_t i = 0; i < block.ops_size(); i++) { + for (int i = 0; i < block.ops_size(); i++) { const auto& op = block.ops(i); OpConverter::Run(op, engine); } diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 5e4788dd47dee44c4fd05a549d9867e1cf9ef044..ace6b083e93140fd5dda4032349f486bd0a9ccbf 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -186,11 +186,7 @@ endif() add_subdirectory(detail) if(WITH_DISTRIBUTE) - if(WITH_GPU) - op_library(gen_nccl_id_op DEPS nccl_common) - else() - set(DEPS_OPS ${DEPS_OPS} gen_nccl_id_op) - endif() + set(DISTRIBUTE_DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf) set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") op_library(send_op DEPS ${DISTRIBUTE_DEPS}) @@ -210,6 +206,12 @@ if(WITH_DISTRIBUTE) set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor) cc_test(test_send_nccl_id SRCS test_send_nccl_id.cc DEPS send_op listen_and_serv_op executor) + if(WITH_GPU) + op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_grpc) + set_source_files_properties(gen_nccl_id_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + else() + set(DEPS_OPS ${DEPS_OPS} gen_nccl_id_op) + endif() else() set(DEPS_OPS ${DEPS_OPS} send_op prefetch_op recv_op listen_and_serv_op send_vars_op send_barrier_op fetch_barrier_op gen_nccl_id_op) endif() diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index e6ee28ea8d920ef80fead258a9bd0d5f6762c879..d09f8479b765ad26cc202bfdb2692828213c7956 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -306,7 +306,7 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { } RequestPrefetch* prefetch = new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_, - dev_ctx_, executor_, program_, prefetch_ctx_); + dev_ctx_, executor_, program_, prefetch_ctx_.get()); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); } diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 18f1bc53d0f561f412a5bbbe018bc3d427ac9ef9..238aaa29634a7eff65429c27aa3538a185723eb2 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -64,8 +64,9 @@ class AsyncGRPCServer final { void SetExecutor(framework::Executor *executor) { executor_ = executor; } - void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { - prefetch_ctx_ = prepared; + void SetPrefetchPreparedCtx( + std::unique_ptr prepared) { + prefetch_ctx_.reset(prepared.release()); } int GetSelectedPort() const { return selected_port_; } @@ -116,7 +117,7 @@ class AsyncGRPCServer final { std::unique_ptr t_get_; std::unique_ptr t_prefetch_; - framework::ExecutorPrepareContext *prefetch_ctx_; + std::unique_ptr prefetch_ctx_; framework::ProgramDesc *program_; framework::Executor *executor_; int selected_port_; diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc index 25b95d608d10d6e456d5f563ce9fbe35d812cb0f..b8db0ad987cdfaec1fc9236c3f26e88891376dce 100644 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ b/paddle/fluid/operators/detail/grpc_server_test.cc @@ -100,7 +100,7 @@ void StartServer(const std::string& endpoint) { InitTensorsOnServer(&scope, &place, 10); rpc_service_->SetProgram(&program); - rpc_service_->SetPrefetchPreparedCtx(prepared.get()); + rpc_service_->SetPrefetchPreparedCtx(std::move(prepared)); rpc_service_->SetDevCtx(&ctx); rpc_service_->SetScope(&scope); rpc_service_->SetExecutor(&exe); diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index a29e0cd52cfccf242a6490822234045e6eb66c0f..abc88d3eb1514e159f4a880f44ecc0c0960a73d9 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -322,8 +322,7 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, // prepare for prefetch VLOG(3) << "prefetch block id is " << prefetch_block->ID(); auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); - rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); - prefetch_prepared.release(); + rpc_service_->SetPrefetchPreparedCtx(std::move(prefetch_prepared)); // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index b5522dd246f250f02d69c0ba749ae6043eb810d6..0522a94195786c767194ec727d982a60451e7c62 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include - +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" @@ -31,6 +31,7 @@ class LoadCombineOp : public framework::OperatorBase { void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { auto filename = Attr("file_path"); + auto load_as_fp16 = Attr("load_as_fp16"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), @@ -59,17 +60,25 @@ class LoadCombineOp : public framework::OperatorBase { // Get data from fin to tensor DeserializeFromStream(fin, tensor, dev_ctx); - if (platform::is_gpu_place(place)) { - // copy CPU to GPU - framework::LoDTensor cpu_tensor; - cpu_tensor.ShareDataWith(*tensor); - cpu_tensor.set_lod(tensor->lod()); - - // reset tensor + auto in_dtype = framework::ToDataType(tensor->type()); + auto out_dtype = + load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + // convert to float16 tensor + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor fp16_tensor; + // copy LoD info to the new tensor + fp16_tensor.set_lod(tensor->lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, *tensor, + &fp16_tensor); + + // reset output tensor out_var->Clear(); tensor = out_var->GetMutable(); - tensor->set_lod(cpu_tensor.lod()); - TensorCopy(cpu_tensor, place, dev_ctx, tensor); + tensor->set_lod(fp16_tensor.lod()); + tensor->ShareDataWith(fp16_tensor); } } } @@ -82,6 +91,13 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { "Out", "(vector) The output LoDTensors that will be read from the input file.") .AsDuplicable(); + AddAttr( + "load_as_fp16", + "(boolean, default false)" + "If true, the tensor will be first loaded and then " + "converted to float16 data type. Otherwise, the tensor will be " + "directly loaded without data type conversion.") + .SetDefault(false); AddAttr("file_path", "(string) " "LoDTensors will be loaded from \"file_path\".") diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index d62ea387cc55c7399973b6f35bace491a49666dc..d39154c6f88d6d17c1719eb9a5b048211f4bb52b 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -38,7 +38,9 @@ template struct SetConstant; template struct Transpose; \ template struct Transpose; \ template struct Transpose; \ - template struct Transpose; + template struct Transpose; \ + template struct Transpose; \ + template struct Transpose; DEFINE_CPU_TRANS(1); DEFINE_CPU_TRANS(2); diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index 47618c51d98eb9f58988f82c0aee0083565d81a6..4743e0d9499b111d8baa921dbb245431713fd7a8 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -139,8 +139,9 @@ TEST(SaveLoadCombineOp, CPU) { CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); } -// FP16 version of SaveLoadCombineOp Test -TEST(SaveLoadCombineFP16Op, CPU) { +// FP16 version of SaveLoadCombineOp Test, only altering the saving aspect +// to save as FP16. +TEST(SaveCombineFP16Op, CPU) { paddle::framework::Scope scope; paddle::platform::CPUPlace place; @@ -169,7 +170,7 @@ TEST(SaveLoadCombineFP16Op, CPU) { 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); // Set attributes - std::string filename = "check_tensor_fp16.ls"; + std::string filename = "check_tensor_fp16_save.ls"; paddle::framework::AttributeMap attrs; attrs.insert({"file_path", std::string(filename)}); attrs.insert({"save_as_fp16", true}); @@ -216,6 +217,89 @@ TEST(SaveLoadCombineFP16Op, CPU) { actual_lod4, numel4); } +// FP16 version of SaveLoadCombineOp Test, only altering the loading aspect +// to load tensors with FP16 precision. +TEST(LoadCombineFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + float* expect1 = CreateForSaveCombineOp( + 10, 10, lod1, "test_var1", place, &scope, &expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + float* expect2 = CreateForSaveCombineOp( + 10, 20, lod2, "test_var2", place, &scope, &expect_lod2); + + std::vector lod3 = {0, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + float* expect3 = CreateForSaveCombineOp( + 20, 200, lod3, "test_var3", place, &scope, &expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + float* expect4 = CreateForSaveCombineOp( + 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); + + // Set attributes + std::string filename = "check_tensor_fp16_load.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto load_var1 = scope.Var("out_var1"); + auto load_var2 = scope.Var("out_var2"); + auto load_var3 = scope.Var("out_var3"); + auto load_var4 = scope.Var("out_var4"); + + attrs.insert({"load_as_fp16", true}); + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + auto* target1 = load_var1->GetMutable(); + auto* target2 = load_var2->GetMutable(); + auto* target3 = load_var3->GetMutable(); + auto* target4 = load_var4->GetMutable(); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + paddle::platform::float16* actual1 = + GetValuesAfterLoadCombineOp(target1, scope, + &actual_lod1); + paddle::platform::float16* actual2 = + GetValuesAfterLoadCombineOp(target2, scope, + &actual_lod2); + paddle::platform::float16* actual3 = + GetValuesAfterLoadCombineOp(target3, scope, + &actual_lod3); + paddle::platform::float16* actual4 = + GetValuesAfterLoadCombineOp(target4, scope, + &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, + actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, + actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, + actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, + actual_lod4, numel4); +} + // Test with original SaveLoadTest TEST(SaveLoadTestWithCombineOp, CPU) { paddle::framework::Scope scope; diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index c44c5f164b2d84616e9a85813e0ee5219b41df28..622420c1c33a62994c81ad9534c4fa37a4a1fa1a 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -105,7 +105,7 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - auto in_dims = ctx->GetInputDim("X"); + auto in_dims = ctx->GetInputDim("Diff"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_GE(out_dims.size(), 2, @@ -127,12 +127,33 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { } }; +class SmoothL1LossGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("smooth_l1_loss_grad"); + op->SetInput("InsideWeight", Input("InsideWeight")); + op->SetInput("OutsideWeight", Input("OutsideWeight")); + op->SetInput("Diff", Output("Diff")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + return std::unique_ptr(op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(smooth_l1_loss, ops::SmoothL1LossOp, ops::SmoothL1LossOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::SmoothL1LossGradMaker); REGISTER_OPERATOR(smooth_l1_loss_grad, ops::SmoothL1LossGradOp); REGISTER_OP_CPU_KERNEL( smooth_l1_loss, diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 598fd4d419078a973647f2f8f20e8a12c8115a8b..79e3c26fef51b4d27520a8079de1074d72f89617 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -1,4 +1,4 @@ -proto_library(profiler_proto SRCS profiler.proto) +proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto) py_proto_compile(profiler_py_proto SRCS profiler.proto) add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index e30c1a9ebf08365a9856fb32b1ce5790869e2b33..09367889a9517956ad01ad2847c31e2633cc643d 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -53,7 +53,7 @@ class NCCLGroupGuard { } inline ~NCCLGroupGuard() { - PADDLE_ENFORCE(dynload::ncclGroupEnd()); + CHECK_EQ(dynload::ncclGroupEnd(), ncclSuccess); NCCLMutex().unlock(); } }; diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index b62291a99d34457dd17bf2bcafc1fc611419f086..50a1c07251b5bc4e7cc27de63f5457d3f94daef5 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -494,23 +494,61 @@ All parameter, weight, gradient are variables in Paddle. m.def("disable_profiler", platform::DisableProfiler); m.def("reset_profiler", platform::ResetProfiler); - py::class_(m, "ParallelExecutor") - .def("__init__", - [](ParallelExecutor &self, size_t num_threads, bool use_event, - const std::vector &places, - const std::unordered_set ¶ms, - const std::unordered_set &bcast_vars, - const ProgramDesc &main_program, const std::string &loss_var_name, - Scope *scope, std::vector &local_scopes, - bool allow_op_delay, bool use_default_grad_scale, - bool balance_parameter_opt_between_cards, size_t num_trainers, - size_t trainer_id) { - new (&self) ParallelExecutor( - num_threads, use_event, places, params, bcast_vars, - main_program, loss_var_name, scope, local_scopes, - allow_op_delay, use_default_grad_scale, - balance_parameter_opt_between_cards, num_trainers, trainer_id); - }) + // -- python binds for parallel executor. + py::class_ pe(m, "ParallelExecutor"); + py::class_(pe, "ExecutionStrategy") + .def(py::init()) + .def_property( + "num_threads", + [](const ExecutionStrategy &self) { return self.num_threads_; }, + [](ExecutionStrategy &self, size_t num_threads) { + self.num_threads_ = num_threads; + }) + .def_property( + "use_event", + [](const ExecutionStrategy &self) { return self.use_event_; }, + [](ExecutionStrategy &self, bool use_event) { + self.use_event_ = use_event; + }) + .def_property( + "allow_op_delay", + [](const ExecutionStrategy &self) { return self.allow_op_delay_; }, + [](ExecutionStrategy &self, bool allow_op_delay) { + self.allow_op_delay_ = allow_op_delay; + }); + py::class_ build_strategy(pe, "BuildStrategy"); + + py::enum_(build_strategy, "ReduceStrategy") + .value("Reduce", BuildStrategy::ReduceStrategy::kReduce) + .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce); + py::enum_(build_strategy, + "GradientScaleStrategy") + .value("CoeffNumDevice", + BuildStrategy::GradientScaleStrategy::kCoeffNumDevice) + .value("One", BuildStrategy::GradientScaleStrategy::kOne) + .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized); + + build_strategy.def(py::init()) + .def_property( + "reduce_strategy", + [](const BuildStrategy &self) { return self.reduce_; }, + [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) { + self.reduce_ = strategy; + }) + .def_property( + "gradient_scale_strategy", + [](const BuildStrategy &self) { return self.gradient_scale_; }, + [](BuildStrategy &self, + BuildStrategy::GradientScaleStrategy strategy) { + self.gradient_scale_ = strategy; + }); + + pe.def(py::init &, + const std::unordered_set &, + const std::unordered_set &, const ProgramDesc &, + const std::string &, Scope *, std::vector &, + const ExecutionStrategy &, const BuildStrategy &, size_t, + size_t>()) .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) // NOTE: even we return a vec* to Python use reference policy. // We still cannot get local_scope from this vector, since the element diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 5bef232cd8fc44ded89ac56a790c8db0955b390a..58a30ab3e522c5b14ac8c72d4b03667de6720425 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -20,19 +20,15 @@ #================================================= function print_usage() { - RED='\033[0;31m' - BLUE='\033[0;34m' - BOLD='\033[1m' - NONE='\033[0m' - echo -e "\n${RED}Usage${NONE}: - ${BOLD}$0${NONE} [OPTION]" + ${BOLD}${SCRIPT_NAME}${NONE} [OPTION]" echo -e "\n${RED}Options${NONE}: ${BLUE}build${NONE}: run build for x86 platform ${BLUE}build_android${NONE}: run build for android platform ${BLUE}build_ios${NONE}: run build for ios platform ${BLUE}test${NONE}: run all unit tests + ${BLUE}single_test${NONE}: run a single unit test ${BLUE}bind_test${NONE}: parallel tests bind to different GPU ${BLUE}doc${NONE}: generate paddle documents ${BLUE}html${NONE}: convert C++ source code into HTML @@ -45,7 +41,15 @@ function print_usage() { } function init() { + RED='\033[0;31m' + BLUE='\033[0;34m' + BOLD='\033[1m' + NONE='\033[0m' + PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )" + if [ -z "${SCRIPT_NAME}" ]; then + SCRIPT_NAME=$0 + fi } function cmake_gen() { @@ -309,6 +313,25 @@ EOF fi } +function single_test() { + TEST_NAME=$1 + if [ -z "${TEST_NAME}" ]; then + echo -e "${RED}Usage:${NONE}" + echo -e "${BOLD}${SCRIPT_NAME}${NONE} ${BLUE}single_test${NONE} [test_name]" + exit 1 + fi + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + if [ ${WITH_TESTING:-ON} == "ON" ] ; then + cat < 0.2: # Smaller value to increase CI speed - # trainer.save_params(save_dirname) - # else: - # print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - # event.epoch + 1, float(avg_cost), float(acc))) - # if math.isnan(float(avg_cost)): - # sys.exit("got NaN loss, training failed.") + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['img', 'label']) + + print("avg_cost: %s" % avg_cost) + print("acc : %s" % acc) + + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(save_dirname) + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + elif isinstance(event, fluid.EndStepEvent): + print("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, map(numpy.array, event.metrics))) train_reader = paddle.batch( paddle.reader.shuffle( @@ -108,10 +100,11 @@ def train(use_cuda, save_dirname): feed_order=['img', 'label']) -def infer(use_cuda, save_dirname=None): +def infer(use_cuda, inference_program, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(param_path=save_dirname, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=save_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -126,10 +119,16 @@ def main(use_cuda): save_dirname = "recognize_digits_conv.inference.model" # call train() with is_local argument to run distributed train - train(use_cuda=use_cuda, save_dirname=save_dirname) - infer(use_cuda=use_cuda, save_dirname=save_dirname) + train( + use_cuda=use_cuda, + train_program=train_program, + save_dirname=save_dirname) + infer( + use_cuda=use_cuda, + inference_program=inference_program, + save_dirname=save_dirname) if __name__ == '__main__': # for use_cuda in (False, True): - main(use_cuda=False) + main(use_cuda=True) diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py index f072d70abdba5077bf0ac2ff6ff972e24c8226f5..32653157994f81c46f420c1b55ceddbbbf06f2fe 100644 --- a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py @@ -40,47 +40,34 @@ def train_program(): predict = inference_program() cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) - # acc = fluid.layers.accuracy(input=predict, label=label) - # return avg_cost, acc - return avg_cost + acc = fluid.layers.accuracy(input=predict, label=label) + return [avg_cost, acc] -def train(use_cuda, save_dirname): +def train(use_cuda, train_program, save_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() optimizer = fluid.optimizer.Adam(learning_rate=0.001) trainer = fluid.Trainer( - train_func=train_program, - infer_func=inference_program, - place=place, - optimizer=optimizer) + train_func=train_program, place=place, optimizer=optimizer) def event_handler(event): if isinstance(event, fluid.EndEpochEvent): - # if (event.epoch + 1) % 10 == 0: - trainer.save_inference_model(save_dirname) - - # TODO: Uncomment this part once we are sure that .train is working - # test_reader = paddle.batch( - # paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) - # test_metrics = trainer.test(reader=test_reader) - # avg_cost_set = test_metrics[0] - # acc_set = test_metrics[1] - # - # # get test acc and loss - # acc = numpy.array(acc_set).mean() - # avg_cost = numpy.array(avg_cost_set).mean() - # - # print("avg_cost: %s" % avg_cost) - # print("acc : %s" % acc) - # - # if float(acc) > 0.2: # Smaller value to increase CI speed - # trainer.save_params(save_dirname) - # else: - # print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - # event.epoch + 1, float(avg_cost), float(acc))) - # if math.isnan(float(avg_cost)): - # sys.exit("got NaN loss, training failed.") + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['img', 'label']) + + print("avg_cost: %s" % avg_cost) + print("acc : %s" % acc) + + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(save_dirname) + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") train_reader = paddle.batch( paddle.reader.shuffle( @@ -94,10 +81,11 @@ def train(use_cuda, save_dirname): feed_order=['img', 'label']) -def infer(use_cuda, save_dirname=None): +def infer(use_cuda, inference_program, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(param_path=save_dirname, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=save_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -112,8 +100,14 @@ def main(use_cuda): save_dirname = "recognize_digits_mlp.inference.model" # call train() with is_local argument to run distributed train - train(use_cuda=use_cuda, save_dirname=save_dirname) - infer(use_cuda=use_cuda, save_dirname=save_dirname) + train( + use_cuda=use_cuda, + train_program=train_program, + save_dirname=save_dirname) + infer( + use_cuda=use_cuda, + inference_program=inference_program, + save_dirname=save_dirname) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py index 00ba4acf88b1b543c256a48709d7f514ebbe1e8c..4f861e5aaeca7ce0f73450c09f9ddc1ed7417469 100644 --- a/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/no_test_word2vec_new_api.py @@ -90,7 +90,7 @@ def train_program(is_sparse): return avg_cost -def train(use_cuda, is_sparse, save_path): +def train(use_cuda, train_program, save_path): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) test_reader = paddle.batch( @@ -105,23 +105,21 @@ def train(use_cuda, is_sparse, save_path): print("loss= ", avg_cost) if avg_cost < 5.0: - trainer.save_inference_model(save_path) + trainer.save_params(save_path) return if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") trainer = fluid.Trainer( - partial(train_program, is_sparse), - partial(inference_program, is_sparse), - fluid.optimizer.SGD(learning_rate=0.001), - place=place) + train_program, fluid.optimizer.SGD(learning_rate=0.001), place=place) trainer.train( reader=train_reader, num_epochs=1, event_handler=event_handler) -def infer(use_cuda, is_sparse, save_path): +def infer(use_cuda, inference_program, save_path): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(param_path=save_path, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=save_path, place=place) lod = [0, 1] first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) @@ -144,9 +142,9 @@ def main(use_cuda, is_sparse): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - save_path = "word2vec.inference.model" - train(use_cuda, is_sparse, save_path) - infer(use_cuda, is_sparse, save_path) + save_path = "word2vec.params" + train(use_cuda, partial(train_program, is_sparse), save_path) + infer(use_cuda, partial(inference_program, is_sparse), save_path) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index 09793760e5504c04ad4b0bfac5c5d7b7047cf85d..f1ee5dfd99e1c8b26280c010c1aaca05a004a5b6 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -182,12 +182,6 @@ def train(use_cuda, save_dirname=None, is_local=True): crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) - chunk_evaluator = fluid.evaluator.ChunkEvaluator( - input=crf_decode, - label=target, - chunk_scheme="IOB", - num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) - train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), @@ -203,7 +197,6 @@ def train(use_cuda, save_dirname=None, is_local=True): def train_loop(main_program): exe.run(fluid.default_startup_program()) - embedding_param = fluid.global_scope().find_var( embedding_name).get_tensor() embedding_param.set( @@ -213,27 +206,19 @@ def train(use_cuda, save_dirname=None, is_local=True): start_time = time.time() batch_id = 0 for pass_id in xrange(PASS_NUM): - chunk_evaluator.reset(exe) for data in train_data(): - cost, precision, recall, f1_score = exe.run( - main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost] + chunk_evaluator.metrics) - pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( - exe) + cost = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + cost = cost[0] if batch_id % 10 == 0: - print("avg_cost:" + str(cost) + " precision:" + str( - precision) + " recall:" + str(recall) + " f1_score:" + - str(f1_score) + " pass_precision:" + str( - pass_precision) + " pass_recall:" + str( - pass_recall) + " pass_f1_score:" + str( - pass_f1_score)) + print("avg_cost:" + str(cost)) if batch_id != 0: print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test - if float(pass_precision) > 0.01: + if float(cost) < 60.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 861dd3174a21d59fe12e0b794ecb2a934946ac71..ce3ba3ebc50d7b015f379b5e80b179463a7b231a 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -13,15 +13,62 @@ # limitations under the License. import paddle.fluid as fluid +import unittest -def test_converter(): - img = fluid.layers.data(name='image', shape=[1, 28, 28]) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) - result = feeder.feed([[[0] * 784, [9]], [[1] * 784, [1]]]) - print(result) +class TestDataFeeder(unittest.TestCase): + def test_lod_level_0_converter(self): + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) + print(result) + + self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['image'].lod(), []) + self.assertEqual(result['label'].lod(), []) + + def test_lod_level_1_converter(self): + # lod_level = 1 + # each sentence has a different number of words + sentences = fluid.layers.data( + name='sentences', shape=[1], dtype='int64', lod_level=1) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) + + # lod = [[0, 3, 5, 9]] + # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] + # label = [1] * len(data) + result = feeder.feed( + [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])]) + print(result) + + self.assertEqual(result['sentences'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [3, 1]) + self.assertEqual(result['sentences'].lod(), [[0, 3, 5, 9]]) + self.assertEqual(result['label'].lod(), []) + + def test_lod_level_2_converter(self): + # lod_level = 2 + # paragraphs -> sentences -> words + paragraphs = fluid.layers.data( + name='paragraphs', shape=[1], dtype='int64', lod_level=2) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) + + # lod = [[0, 2, 3], [0, 3, 5, 9]] + # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] + # label = [1] * len(data) + result = feeder.feed( + [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])]) + print(result) + + self.assertEqual(result['paragraphs'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['paragraphs'].lod(), [[0, 2, 3], [0, 3, 5, 9]]) + self.assertEqual(result['label'].lod(), []) if __name__ == '__main__': - test_converter() + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index d9190408e151283ece8460286dd67818dd39da3e..2ae9653953c2f5f6a399243bef2c7fb756f9692f 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -28,11 +28,11 @@ function(py_test_modules TARGET_NAME) if(WITH_TESTING) set(options "") set(oneValueArgs "") - set(multiValueArgs MODULES DEPS ARGS ENVS) + set(multiValueArgs MODULES DEPS ENVS) cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_modules_ENVS} - ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} + ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() endfunction() @@ -66,6 +66,7 @@ list(REMOVE_ITEM TEST_OPS test_fetch_var) list(REMOVE_ITEM TEST_OPS test_parallel_op) list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input) list(REMOVE_ITEM TEST_OPS test_dist_train) +list(REMOVE_ITEM TEST_OPS test_network_with_dtype) # tests that can be bundled together in one python process for speed. if(WITH_FAST_BUNDLE_TEST) @@ -83,6 +84,7 @@ py_test_modules(test_parallel_executor MODULES test_parallel_executor) py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) py_test_modules(test_mul_op MODULES test_mul_op) +py_test_modules(test_network_with_dtype MODULES test_network_with_dtype) # tests that need to be run in separate process. py_test_modules(test_multihead_attention MODULES test_multihead_attention) diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py index fe8aceb3ae42f73590bffe2a372c771654a372a9..d4835dd18405fc7a0d508a780a734922e0abd12c 100644 --- a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -24,33 +24,30 @@ BATCH_SIZE = 20 class TestNetWithDtype(unittest.TestCase): - def set_network(self): + def setUp(self): self.dtype = "float64" self.init_dtype() - main = fluid.Program() - with fluid.program_guard(main): - self.x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) - self.y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) - y_predict = fluid.layers.fc(input=self.x, size=1, act=None) - cost = fluid.layers.square_error_cost(input=y_predict, label=self.y) + def run_net_on_place(self, place): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) + y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) - self.program = main - self.fetch_list = [avg_cost] + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - sgd_optimizer.minimize(avg_cost) - - def run_net_on_place(self, place): + fetch_list = [avg_cost] train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) - feeder = fluid.DataFeeder(place=place, feed_list=[self.x, self.y]) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(startup) for data in train_reader(): - exe.run(self.program, - feed=feeder.feed(data), - fetch_list=self.fetch_list) + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) # the main program is runable, the datatype is fully supported break @@ -58,14 +55,12 @@ class TestNetWithDtype(unittest.TestCase): pass def test_cpu(self): - self.set_network() place = fluid.CPUPlace() self.run_net_on_place(place) def test_gpu(self): if not core.is_compiled_with_cuda(): return - self.set_network() place = fluid.CUDAPlace(0) self.run_net_on_place(place) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py index a3be1a8db68c0d9d46746e70d95342447c35e237..056f9e1781997aa1586d972874b652d5b725fe3f 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor.py @@ -232,14 +232,18 @@ class TestParallelExecutorBase(unittest.TestCase): place = fluid.CUDAPlace(0) startup_exe = fluid.Executor(place) startup_exe.run(startup) + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.allow_op_delay = allow_op_delay + + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce if balance_parameter_opt_between_cards else fluid.BuildStrategy.ReduceStrategy.AllReduce if use_parallel_executor: exe = fluid.ParallelExecutor( True, loss_name=loss.name, - allow_op_delay=allow_op_delay, - balance_parameter_opt_between_cards=balance_parameter_opt_between_cards - ) + exec_strategy=exec_strategy, + build_strategy=build_strategy) else: exe = fluid.Executor(place=place) @@ -548,7 +552,7 @@ class TestTransformer(TestParallelExecutorBase): class ParallelExecutorTestingDuringTraining(unittest.TestCase): - def check_network_convergence(self, balance_parameter_opt_between_cards): + def check_network_convergence(self, build_strategy=None): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -571,15 +575,13 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): use_cuda=True, loss_name=loss.name, main_program=main, - balance_parameter_opt_between_cards=balance_parameter_opt_between_cards - ) + build_strategy=build_strategy) test_exe = fluid.ParallelExecutor( use_cuda=True, main_program=test_program, share_vars_from=train_exe, - balance_parameter_opt_between_cards=balance_parameter_opt_between_cards - ) + build_strategy=build_strategy) for i in xrange(5): test_loss, = test_exe.run([loss.name], feed=feed_dict) @@ -594,10 +596,14 @@ class ParallelExecutorTestingDuringTraining(unittest.TestCase): str(test_loss)) def test_parallel_testing(self): - self.check_network_convergence(False) + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + self.check_network_convergence(build_strategy) def test_parallel_testing_with_new_strategy(self): - self.check_network_convergence(True) + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + self.check_network_convergence(build_strategy) import paddle.dataset.conll05 as conll05 @@ -617,7 +623,7 @@ embedding_name = 'emb' def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, - is_sparse, balance_parameter_opt_between_cards, **ignored): + is_sparse, **ignored): # 8 features predicate_embedding = fluid.layers.embedding( input=predicate, @@ -686,9 +692,7 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, class TestCRFModel(unittest.TestCase): - def check_network_convergence(self, - is_sparse, - balance_parameter_opt_between_cards=False): + def check_network_convergence(self, is_sparse, build_strategy=None): main = fluid.Program() startup = fluid.Program() with fluid.program_guard(main, startup): @@ -739,8 +743,7 @@ class TestCRFModel(unittest.TestCase): pe = fluid.ParallelExecutor( use_cuda=True, loss_name=avg_cost.name, - balance_parameter_opt_between_cards=balance_parameter_opt_between_cards - ) + build_strategy=build_strategy) feeder = fluid.DataFeeder( feed_list=[ @@ -756,19 +759,29 @@ class TestCRFModel(unittest.TestCase): pe.run(feed=feeder.feed(cur_batch), fetch_list=[avg_cost.name]))[0] - def test_update_sparse_parameter(self): - self.check_network_convergence(is_sparse=True) + def test_update_sparse_parameter_all_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy) - def test_update_dense_parameter(self): - self.check_network_convergence(is_sparse=False) + def test_update_dense_parameter_all_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy) - def test_update_sparse_parameter_with_new_strategy(self): + def test_update_sparse_parameter_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( - is_sparse=False, balance_parameter_opt_between_cards=True) + is_sparse=True, build_strategy=build_strategy) - def test_update_dense_parameter_with_new_strategy(self): + def test_update_dense_parameter_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce self.check_network_convergence( - is_sparse=False, balance_parameter_opt_between_cards=True) + is_sparse=False, build_strategy=build_strategy) # test fetch all the variables of global_block @@ -836,7 +849,7 @@ class TestFetchOp(unittest.TestCase): assert not math.isnan(np.sum(ret[i])) and \ not math.isinf(np.sum(ret[i])) - def test_update_sparse_parameter(self): + def test_fetch_op(self): tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) tst_reader_iter = tst_reader() diff --git a/python/paddle/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_split_var.py index 79d387f0066672058d1640f4e5fd28ed8913fe4c..0c5e8901b903375c7d4de32943e657b205d8fae9 100644 --- a/python/paddle/fluid/tests/unittests/test_split_var.py +++ b/python/paddle/fluid/tests/unittests/test_split_var.py @@ -21,15 +21,7 @@ import random class TestSplitVar(unittest.TestCase): - def test_check_output(self): - # split below shapes to 10 servers - shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] - expected_sizes = [ - [15], [1024], - [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], - [2040, 2040, 2040, 2040], - [1150, 1150, 1150, 1150, 1150, 1150, 1100] - ] + def check_split_output(self, shapes, expected_sizes, min_size): var_list = [] program = fluid.Program() for shape in shapes: @@ -39,7 +31,7 @@ class TestSplitVar(unittest.TestCase): # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) - blocks = split_dense_variable(var_list, 10) + blocks = split_dense_variable(var_list, 10, min_size) all_sizes = [] for s in expected_sizes: for s2 in s: @@ -48,6 +40,25 @@ class TestSplitVar(unittest.TestCase): varname, block_id, size = block_str.split(":") self.assertEqual(int(size), all_sizes[i]) + def test_1k(self): + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] + expected_sizes = [ + [15], [1024], + [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], + [2040, 2040, 2040, 2040], + [1150, 1150, 1150, 1150, 1150, 1150, 1100] + ] + + self.check_split_output(shapes, expected_sizes, 1024) + + def test_check_output_8k(self): + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10], + [6, 33, 33, 33]] + expected_sizes = [[15], [1024], [10976, 10976], [8160], [8000], + [35937, 35937, 35937, 35937, 35937, 35937]] + + self.check_split_output(shapes, expected_sizes, 8192) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index 67d8be82d5fa850a526f92753a086dac0cab5e2c..d158d586321833fdf046e4e061bfa8460b9a31b5 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -20,6 +20,7 @@ import data_feeder import contextlib import io import unique_name +import parallel_executor # optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module import optimizer as opt_module @@ -48,12 +49,14 @@ class BeginStepEvent(object): def __init__(self, epoch_id, step_id): self.epoch = epoch_id self.step = step_id + self.fetch_metrics = True class EndStepEvent(object): - def __init__(self, epoch_id, step_id): + def __init__(self, epoch_id, step_id, metrics): self.epoch = epoch_id self.step = step_id + self.metrics = metrics def check_and_get_place(place): @@ -87,24 +90,23 @@ class Trainer(object): Args: train_func(callable): A function which will return loss. The loss must be a scalar. - infer_func(callable): A function which will return predict, used to save inference model optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer place: The device place of this trainer. """ def __init__(self, train_func, - infer_func, optimizer, param_path=None, - place=None): + place=None, + parallel=False): + self.parallel = parallel # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py if not isinstance(optimizer, opt_module.Optimizer): raise TypeError("The optimizer should be an instance of Optimizer") - self.infer_func = infer_func self.scope = core.Scope() self.startup_program = framework.Program() @@ -112,14 +114,14 @@ class Trainer(object): with framework.program_guard(self.train_program, self.startup_program): program_func_outs = train_func() - self.test_outputs = program_func_outs if isinstance( + self.train_func_outputs = program_func_outs if isinstance( program_func_outs, list) else [program_func_outs] self.test_program = self.train_program.clone() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") # The fisrt element of program_func_outs is loss. - loss = self.test_outputs[0] + loss = self.train_func_outputs[0] optimize_ops, params_grads = optimizer.minimize(loss) self.place = check_and_get_place(place) @@ -137,7 +139,40 @@ class Trainer(object): # load params from param_path into scope io.load_persistables(exe, dirname=param_path) + def _transpile_nccl2_dist(self): + # PADDLE_TRAINER_IPS + if "PADDLE_TRAINER_IPS" not in os.environ: + self.nccl_id_var = None + else: + self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + port = os.getenv("PADDLE_PSERVER_PORT") + worker_ips = os.getenv("PADDLE_TRAINER_IPS") + worker_endpoints = [] + for ip in worker_ips.split(","): + worker_endpoints.append(':'.join([ip, port])) + self.num_trainers = len(worker_endpoints) + current_endpoint = os.getenv("POD_IP") + ":" + port + worker_endpoints.remove(current_endpoint) + # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id + # in ParallelExecutor to start + # distributed training using NCCL2 + self.nccl_id_var = self.startup_program.global_block().create_var( + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + self.startup_program.global_block().append_op( + type="gen_nccl_id", + inputs={}, + outputs={"NCCLID": self.nccl_id_var}, + attrs={ + "endpoint": current_endpoint, + "endpoint_list": worker_endpoints, + "trainer_id": self.trainer_id + }) + def _dist_transpile_if_necessary(self, optimize_ops, params_grads): + self._transpile_nccl2_dist() + if self.nccl_id_var != None: + return + if "PADDLE_TRAINING_ROLE" not in os.environ: return @@ -175,12 +210,7 @@ class Trainer(object): 'TRAINING_ROLE environment variable must be either TRAINER or PSERVER' ) - def train(self, - num_epochs, - event_handler, - reader=None, - parallel=False, - feed_order=None): + def train(self, num_epochs, event_handler, reader=None, feed_order=None): """ Train the model. @@ -188,27 +218,26 @@ class Trainer(object): num_epochs: The number of epoch. An epoch will process all data in reader event_handler: The event handler. A function with type (ev:Event)->void reader: - parallel: True if use multi-CPUs or multi-GPUs feed_order: Feeding order of reader. None will following the defining order in program Returns: """ - if parallel: - raise NotImplementedError( - "Parallel Executor version of trainer is not implemented") - training_role = os.getenv("PADDLE_TRAINING_ROLE", "") if training_role == "PSERVER": with self._prog_and_scope_guard(): exe = executor.Executor(self.place) exe.run() return + if self.parallel: + self._train_by_parallel_executor(num_epochs, event_handler, reader, + feed_order) + else: + self._train_by_executor(num_epochs, event_handler, reader, + feed_order) - self._train_by_executor(num_epochs, event_handler, reader, feed_order) - - def test(self, reader, feed_order=None): + def test(self, reader, feed_order): """ Test the model on given test data @@ -218,7 +247,8 @@ class Trainer(object): order in program """ - return self._test_by_executor(reader, feed_order, self.test_outputs) + return self._test_by_executor(reader, feed_order, + self.train_func_outputs) def save_params(self, param_path): # reference: save_persistables in io.py @@ -226,15 +256,6 @@ class Trainer(object): exe = executor.Executor(self.place) io.save_persistables(exe, dirname=param_path) - def save_inference_model(self, model_path): - inference_program = framework.Program() - with framework.program_guard(inference_program): - with unique_name.guard(): - predict_var = self.infer_func() - predict_var = self.train_program.block(0).var(predict_var.name) - exe = executor.Executor(self.place) - io.save_inference_model(model_path, [], [predict_var], exe) - @contextlib.contextmanager def _prog_and_scope_guard(self): with framework.program_guard( @@ -261,13 +282,25 @@ class Trainer(object): feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) exe = executor.Executor(self.place) - for epoch_id in range(num_epochs): - event_handler(BeginEpochEvent(epoch_id)) - for step_id, data in enumerate(reader()): - event_handler(BeginStepEvent(epoch_id, step_id)) - exe.run(feed=feeder.feed(data), fetch_list=[]) - event_handler(EndStepEvent(epoch_id, step_id)) - event_handler(EndEpochEvent(epoch_id)) + reader = feeder.decorate_reader(reader, multi_devices=False) + self._train_by_any_executor(event_handler, exe, num_epochs, reader) + + def _train_by_any_executor(self, event_handler, exe, num_epochs, reader): + for epoch_id in range(num_epochs): + event_handler(BeginEpochEvent(epoch_id)) + for step_id, data in enumerate(reader()): + begin_event = BeginStepEvent(epoch_id, step_id) + event_handler(begin_event) + if begin_event.fetch_metrics: + metrics = exe.run(feed=data, + fetch_list=[ + var.name + for var in self.train_func_outputs + ]) + else: + metrics = exe.run(feed=data, fetch_list=[]) + event_handler(EndStepEvent(epoch_id, step_id, metrics)) + event_handler(EndEpochEvent(epoch_id)) def _test_by_executor(self, reader, feed_order, fetch_list): with executor.scope_guard(self.scope): @@ -286,17 +319,34 @@ class Trainer(object): return [x / count for x in accumulated] + def _train_by_parallel_executor(self, num_epochs, event_handler, reader, + feed_order): + with self._prog_and_scope_guard(): + pe = self._get_or_create_parallel_executor() + feed_var_list = build_feed_var_list(self.train_program, feed_order) + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) + reader = feeder.decorate_reader(reader, multi_devices=True) + for epoch_id in range(num_epochs): + self._train_by_any_executor(event_handler, pe, num_epochs, + reader) + + def _get_parallel_executor(self): + return getattr(self, 'parallel_executor', None) + + def _get_or_create_parallel_executor(self): + if self._get_parallel_executor() is None: + self.parallel_executor = parallel_executor.ParallelExecutor( + use_cuda=isinstance(self.place, core.CUDAPlace), + loss_name=self.train_func_outputs[0].name) + return self._get_parallel_executor() + def build_feed_var_list(program, feed_order): if not isinstance(program, framework.Program): raise TypeError("The 'program' should be an object of Program") - if feed_order is None: - feed_var_list = [ - var for var in program.global_block().vars.itervalues() - if var.is_data - ] - elif isinstance(feed_order, list): + if isinstance(feed_order, list): feed_var_list = [ program.global_block().var(var_name) for var_name in feed_order ] diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index bc9b6fa57c12250aae382b535788fa00678e8535..806cc2fcc16df35ec25cdd55d69f6bb4fdfe283b 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -93,30 +93,33 @@ def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") -def split_dense_variable(var_list, - pserver_count, - min_block_size=1024, - max_block_size=1048576): +def split_dense_variable(var_list, service_count, min_block_size=8192): """ - We may need to split dense tensor to one or more blocks and put - them equally onto parameter server. One block is a sub-tensor - aligned by dim[0] of the tensor. - - We need to have a minimal block size so that the calculations in - the parameter server side can gain better performance. By default - minimum block size is 1024. The max block size is used to prevent - very large blocks that may cause send error. - :return: A list of VarBlocks. Each VarBlock specifies a shard of - the var. + We may need to split dense tensor to one or more blocks and put + them equally onto parameter server. One block is a sub-tensor + aligned by dim[0] of the tensor. + + We need to have a minimal block size so that the calculations in + the parameter server side can gain better performance. By default + minimum block size 8K elements (maybe 16bit or 32bit or 64bit). + + Args: + var_list (list): List of variables. + service_count (int): Numel of pserver services. A pserver may have two + or more listening ports. + min_block_size (int): Minimum splitted block size. + Returns: + blocks (list[(varname, block_id, current_block_size)]): A list + of VarBlocks. Each VarBlock specifies a shard of the var. """ blocks = [] for var in var_list: - split_count = pserver_count + split_count = service_count var_numel = reduce(lambda x, y: x * y, var.shape) max_pserver_count = int(math.floor(var_numel / float(min_block_size))) if max_pserver_count == 0: max_pserver_count = 1 - if max_pserver_count < pserver_count: + if max_pserver_count < service_count: split_count = max_pserver_count block_size = int(math.ceil(var_numel / float(split_count))) @@ -803,9 +806,18 @@ class DistributeTranspiler: Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. - :return: A dict mapping from original var name to each var split. + Args: + program (ProgramDesc): ProgramDesc which gradients blong. + block_list (list[(varname, block_id, block_size)]): List of gradient blocks. + add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True. + Returns: + var_mapping (dict(varname->[new_varname_variable])):A dict mapping + from original var name to each var split. """ + + # varname->[(block_id, current_block_size)] block_map = dict() + var_mapping = dict() for block_str in block_list: varname, offset, size = block_str.split(":") @@ -1190,6 +1202,12 @@ class DistributeTranspiler: return lr_ops def _get_optimize_pass(self): + """ + Get optimizer operators, paramters and gradients from origin_program + Returns: + opt_ops (list): optimize operators. + params_grads (dict): paramter->gradient. + """ block = self.origin_program.global_block() opt_ops = [] params_grads = [] diff --git a/tools/test_runner.py b/tools/test_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..9dc750b89058cd73355a2f7984d577252c03526d --- /dev/null +++ b/tools/test_runner.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import sys +import paddle.fluid as fluid +import importlib +import cStringIO + + +def main(): + sys.path.append(os.getcwd()) + some_test_failed = False + for module_name in sys.argv[1:]: + buffer = cStringIO.StringIO() + main = fluid.Program() + startup = fluid.Program() + scope = fluid.core.Scope() + with fluid.program_guard(main, startup): + with fluid.scope_guard(scope): + with fluid.unique_name.guard(): + test_loader = unittest.TestLoader() + module = importlib.import_module(module_name) + tests = test_loader.loadTestsFromModule(module) + res = unittest.TextTestRunner(stream=buffer).run(tests) + if not res.wasSuccessful(): + some_test_failed = True + print >> sys.stderr, module_name, 'failed\n', buffer.getvalue( + ) + + if some_test_failed: + exit(1) + + +if __name__ == '__main__': + main()